static void __wakeup_reset(struct trace_array *tr)
{
	struct trace_array_cpu *data;
	int cpu;

	for_each_possible_cpu(cpu) {
		data = tr->data[cpu];
		tracing_reset(tr, cpu);
	}

	wakeup_cpu = -1;
	wakeup_prio = -1;

	if (wakeup_task)
		put_task_struct(wakeup_task);

	wakeup_task = NULL;
}
static int wait_for_vfork_done(struct task_struct *child,
				struct completion *vfork)
{
	int killed;

	freezer_do_not_count();
	killed = wait_for_completion_killable(vfork);
	freezer_count();

	if (killed) {
		task_lock(child);
		child->vfork_done = NULL;
		task_unlock(child);
	}

	put_task_struct(child);
	return killed;
}
Beispiel #3
0
static void do_force_revoke(struct hw3d_info *info)
{
	unsigned long flags;

	spin_lock_irqsave(&info->lock, flags);
	pr_debug("hw3d: forcing revoke\n");
	locked_hw3d_irq_disable(info);
	if (info->client_task) {
		pr_info("hw3d: force revoke from pid=%d\n",
			info->client_task->pid);
		force_sig(SIGKILL, info->client_task);
		put_task_struct(info->client_task);
		info->client_task = NULL;
	}
	locked_hw3d_client_done(info, 1);
	pr_debug("hw3d: done forcing revoke\n");
	spin_unlock_irqrestore(&info->lock, flags);
}
Beispiel #4
0
/*
 * wake a single writer
 */
static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;

	sem->activity = -1;

	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
	list_del(&waiter->list);

	tsk = waiter->task;
	smp_mb();
	waiter->task = NULL;
	wake_up_process(tsk);
	put_task_struct(tsk);
	return sem;
}
Beispiel #5
0
static void oom_kill_process(struct oom_control *oc, const char *message)
{
	struct task_struct *victim = oc->chosen;
	struct mem_cgroup *oom_group;
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);

	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just give it access to memory reserves
	 * so it can die quickly
	 */
	task_lock(victim);
	if (task_will_free_mem(victim)) {
		mark_oom_victim(victim);
		wake_oom_reaper(victim);
		task_unlock(victim);
		put_task_struct(victim);
		return;
	}
	task_unlock(victim);

	if (__ratelimit(&oom_rs))
		dump_header(oc, victim);

	/*
	 * Do we need to kill the entire memory cgroup?
	 * Or even one of the ancestor memory cgroups?
	 * Check this out before killing the victim task.
	 */
	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);

	__oom_kill_process(victim, message);

	/*
	 * If necessary, kill all tasks in the selected memory cgroup.
	 */
	if (oom_group) {
		mem_cgroup_print_oom_group(oom_group);
		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
				      (void*)message);
		mem_cgroup_put(oom_group);
	}
}
Beispiel #6
0
static struct dentry *proc_lookupfd_common(struct inode *dir,
					   struct dentry *dentry,
					   instantiate_t instantiate)
{
	struct task_struct *task = get_proc_task(dir);
	struct dentry *result = ERR_PTR(-ENOENT);
	unsigned fd = name_to_int(dentry);

	if (!task)
		goto out_no_task;
	if (fd == ~0U)
		goto out;

	result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
out:
	put_task_struct(task);
out_no_task:
	return result;
}
Beispiel #7
0
SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
		unsigned long, data)
{
	struct task_struct *child;
	long ret;

	if (request == PTRACE_TRACEME) {
		ret = ptrace_traceme();
		if (!ret)
			arch_ptrace_attach(current);
		goto out;
	}

	child = ptrace_get_task_struct(pid);
	if (IS_ERR(child)) {
		ret = PTR_ERR(child);
		goto out;
	}

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		/*
		 * Some architectures need to do book-keeping after
		 * a ptrace attach.
		 */
		if (!ret)
			arch_ptrace_attach(child);
		goto out_put_task_struct;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (ret < 0)
		goto out_put_task_struct;

	ret = arch_ptrace(child, request, addr, data);
	if (ret || request != PTRACE_DETACH)
		ptrace_unfreeze_traced(child);

 out_put_task_struct:
	put_task_struct(child);
 out:
	return ret;
}
Beispiel #8
0
/*
 * /proc/pid/fd needs a special permission handler so that a process can still
 * access /proc/self/fd after it has executed a setuid().
 */
int proc_fd_permission(struct inode *inode, int mask)
{
	struct task_struct *task;
	int rv = generic_permission(inode, mask);

	if (task_tgid(current) == proc_pid(inode))
		rv = 0;

	task = get_proc_task(inode);
	if (task == NULL)
		return rv;

	if (gr_acl_handle_procpidmem(task))
		rv = -EACCES;

	put_task_struct(task);

	return rv;
}
Beispiel #9
0
/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
        struct kthread *kthread = task_get_live_kthread(k);
        int ret;

        trace_sched_kthread_stop(k);
        if (kthread) {
                set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
                clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
                wake_up_process(k);
                wait_for_completion(&kthread->exited);
        }
        ret = k->exit_code;

        put_task_struct(k);
        trace_sched_kthread_stop_ret(ret);

        return ret;
}
Beispiel #10
0
static int seq_show(struct seq_file *m, void *v)
{
	struct files_struct *files = NULL;
	int f_flags = 0, ret = -ENOENT;
	struct file *file = NULL;
	struct task_struct *task;

	task = get_proc_task(m->private);
	if (!task)
		return -ENOENT;

	files = get_files_struct(task);
	put_task_struct(task);

	if (files) {
		int fd = proc_fd(m->private);

		spin_lock(&files->file_lock);
		file = fcheck_files(files, fd);
		if (file) {
			struct fdtable *fdt = files_fdtable(files);

			f_flags = file->f_flags;
			if (close_on_exec(fd, fdt))
				f_flags |= O_CLOEXEC;

			get_file(file);
			ret = 0;
		}
		spin_unlock(&files->file_lock);
		put_files_struct(files);
	}

	if (!ret) {
                seq_printf(m, "pos:\t%lli\nflags:\t0%o\n",
			   (long long)file->f_pos, f_flags);
		if (file->f_op->show_fdinfo)
			ret = file->f_op->show_fdinfo(m, file);
		fput(file);
	}

	return ret;
}
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here, then:
 *   - the 'active count' _reached_ zero
 *   - the 'waiting count' is non-zero
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having task zeroed
 * - writers are only woken if wakewrite is non-zero
 */
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	int woken;

	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);

	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
		if (wakewrite) {
#ifdef CONFIG_BRCM_DEBUG_RWSEM
			sem->wr_owner = waiter->task;
#endif
			/* Wake up a writer. Note that we do not grant it the
			 * lock - it will have to acquire it when it runs. */
			wake_up_process(waiter->task);
		}
		goto out;
	}

	/* grant an infinite number of read locks to the front of the queue */
	woken = 0;
	do {
		struct list_head *next = waiter->list.next;

		list_del(&waiter->list);
		tsk = waiter->task;
		smp_mb();
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
		woken++;
		if (next == &sem->wait_list)
			break;
		waiter = list_entry(next, struct rwsem_waiter, list);
	} while (waiter->type != RWSEM_WAITING_FOR_WRITE);

	sem->activity += woken;

 out:
	return sem;
}
Beispiel #12
0
/*
 * Decrement the use count of the proc_dir_entry.
 */
static void proc_delete_inode(struct inode *inode)
{
	struct proc_dir_entry *de;
	struct task_struct *tsk;

	/* Let go of any associated process */
	tsk = PROC_I(inode)->task;
	if (tsk)
		put_task_struct(tsk);

	/* Let go of any associated proc directory entry */
	de = PROC_I(inode)->pde;
	if (de) {
		if (de->owner)
			module_put(de->owner);
		de_put(de);
	}
	clear_inode(inode);
}
Beispiel #13
0
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
				  compat_long_t addr, compat_long_t data)
{
	struct task_struct *child;
	long ret;

	/*
	 * This lock_kernel fixes a subtle race with suid exec
	 */
	lock_kernel();
	if (request == PTRACE_TRACEME) {
		ret = ptrace_traceme();
		goto out;
	}

	child = ptrace_get_task_struct(pid);
	if (IS_ERR(child)) {
		ret = PTR_ERR(child);
		goto out;
	}

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		/*
		 * Some architectures need to do book-keeping after
		 * a ptrace attach.
		 */
		if (!ret)
			arch_ptrace_attach(child);
		goto out_put_task_struct;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (!ret)
		ret = compat_arch_ptrace(child, request, addr, data);

 out_put_task_struct:
	put_task_struct(child);
 out:
	unlock_kernel();
	return ret;
}
Beispiel #14
0
static int proc_readfd_common(struct file *file, struct dir_context *ctx,
			      instantiate_t instantiate)
{
	struct task_struct *p = get_proc_task(file_inode(file));
	struct files_struct *files;
	unsigned int fd;

	if (!p)
		return -ENOENT;

	if (!dir_emit_dots(file, ctx))
		goto out;
	files = get_files_struct(p);
	if (!files)
		goto out;

	rcu_read_lock();
	for (fd = ctx->pos - 2;
	     fd < files_fdtable(files)->max_fds;
	     fd++, ctx->pos++) {
		char name[PROC_NUMBUF];
		int len;

		if (!fcheck_files(files, fd))
			continue;
		rcu_read_unlock();

		len = snprintf(name, sizeof(name), "%u", fd);
		if (!proc_fill_cache(file, ctx,
				     name, len, instantiate, p,
				     (void *)(unsigned long)fd))
			goto out_fd_loop;
		cond_resched();
		rcu_read_lock();
	}
	rcu_read_unlock();
out_fd_loop:
	put_files_struct(files);
out:
	put_task_struct(p);
	return 0;
}
Beispiel #15
0
int monitor_fn(void* unused)
{
        tm->target_task = get_pid_task(tm->target_pid, PIDTYPE_PID);
        
        while(tm->target_task && pid_alive(tm->target_task)
              && !kthread_should_stop()){
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(HZ);
                pr_info("taskmonitor: pid %d\tusr %d\tsys %d\n", target,
                        (int)tm->target_task->utime,
                        (int)tm->target_task->stime);
        }

        if(tm->target_task)
                put_task_struct(tm->target_task);
        
	pr_warn("monitor_fn: target task is no longer alive !\n");
        
        return 0;
}
Beispiel #16
0
/**
 * kthread_unpark - unpark a thread created by kthread_create().
 * @k:          thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return false, wakes it, and
 * waits for it to return. If the thread is marked percpu then its
 * bound to the cpu again.
 */
void kthread_unpark(struct task_struct *k)
{
        struct kthread *kthread = task_get_live_kthread(k);

        if (kthread) {
                clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
                /*
                 * We clear the IS_PARKED bit here as we don't wait
                 * until the task has left the park code. So if we'd
                 * park before that happens we'd see the IS_PARKED bit
                 * which might be about to be cleared.
                 */
                if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
                        if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
                                __kthread_bind(k, kthread->cpu);
                        wake_up_process(k);
                }
        }
        put_task_struct(k);
}
Beispiel #17
0
static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
	struct inode *inode = d_inode(dentry);
	const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
	struct task_struct *task;
	char name[50];
	int res = -EACCES;

	task = get_proc_task(inode);
	if (!task)
		return res;

	if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
		res = ns_get_name(name, sizeof(name), task, ns_ops);
		if (res >= 0)
			res = readlink_copy(buffer, buflen, name);
	}
	put_task_struct(task);
	return res;
}
Beispiel #18
0
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
				  compat_long_t addr, compat_long_t data)
{
	struct task_struct *child;
	long ret;
#ifdef CONFIG_CCSECURITY
	if (ccs_ptrace_permission(request, pid))
		return -EPERM;
#endif

	if (request == PTRACE_TRACEME) {
		ret = ptrace_traceme();
		goto out;
	}

	child = ptrace_get_task_struct(pid);
	if (IS_ERR(child)) {
		ret = PTR_ERR(child);
		goto out;
	}

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		/*
		 * Some architectures need to do book-keeping after
		 * a ptrace attach.
		 */
		if (!ret)
			arch_ptrace_attach(child);
		goto out_put_task_struct;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (!ret)
		ret = compat_arch_ptrace(child, request, addr, data);

 out_put_task_struct:
	put_task_struct(child);
 out:
	return ret;
}
Beispiel #19
0
void ion_client_destroy(struct ion_client *client)
{
	struct ion_device *dev = client->dev;
	struct rb_node *n;

	pr_debug("%s: %d\n", __func__, __LINE__);
	while ((n = rb_first(&client->handles))) {
		struct ion_handle *handle = rb_entry(n, struct ion_handle,
						     node);
		ion_handle_destroy(&handle->ref);
	}
	mutex_lock(&dev->lock);
	if (client->task)
		put_task_struct(client->task);
	rb_erase(&client->node, &dev->clients);
	debugfs_remove_recursive(client->debug_root);
	mutex_unlock(&dev->lock);

	kfree(client->name);
	kfree(client);
}
Beispiel #20
0
static int tegra_overlay_release(struct inode *inode, struct file *filp)
{
	struct overlay_client *client = filp->private_data;
	unsigned long flags;
	int i;

	mutex_lock(&client->dev->overlays_lock);
	for (i = 0; i < client->dev->dc->n_windows; i++)
		if (client->dev->overlays[i].owner == client)
			tegra_overlay_put_locked(client, i);
	mutex_unlock(&client->dev->overlays_lock);

	spin_lock_irqsave(&client->dev->clients_lock, flags);
	list_del(&client->list);
	spin_unlock_irqrestore(&client->dev->clients_lock, flags);

	put_task_struct(client->task);

	kfree(client);
	return 0;
}
Beispiel #21
0
struct file *get_task_file(pid_t pid, int fd)
{
	int err;
	struct task_struct *tsk;
	struct files_struct *fs;
	struct file *file = NULL;

	err = -ESRCH;
	read_lock(&tasklist_lock);
	tsk = find_task_by_pid_ns(pid, get_exec_env()->ve_ns->pid_ns);
	if (tsk == NULL) {
		read_unlock(&tasklist_lock);
		goto out;
	}

	get_task_struct(tsk);
	read_unlock(&tasklist_lock);

	err = -EINVAL;
	fs = get_files_struct(tsk);
	if (fs == NULL)
		goto out_put;

	rcu_read_lock();
	err = -EBADF;
	file = fcheck_files(fs, fd);
	if (file == NULL)
		goto out_unlock;

	err = 0;
	get_file(file);

out_unlock:
	rcu_read_unlock();
	put_files_struct(fs);
out_put:
	put_task_struct(tsk);
out:
	return err ? ERR_PTR(err) : file;
}
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
				  compat_long_t addr, compat_long_t data)
{
	struct task_struct *child;
	long ret;

	if (request == PTRACE_TRACEME) {
		ret = ptrace_traceme();
		goto out;
	}

	child = ptrace_get_task_struct(pid);
	if (IS_ERR(child)) {
		ret = PTR_ERR(child);
		goto out;
	}

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		/*
		 * Some architectures need to do book-keeping after
		 * a ptrace attach.
		 */
		if (!ret)
			arch_ptrace_attach(child);
		goto out_put_task_struct;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (!ret) {
		ret = compat_arch_ptrace(child, request, addr, data);
		if (ret || request != PTRACE_DETACH)
			ptrace_unfreeze_traced(child);
	}

 out_put_task_struct:
	put_task_struct(child);
 out:
	return ret;
}
Beispiel #23
0
/* Bring one cpu online.*/
static int __init
smp_boot_one_cpu(int cpuid)
{
	unsigned timeout;
	struct task_struct *idle;
	cpumask_t cpu_mask = CPU_MASK_NONE;

	idle = fork_idle(cpuid);
	if (IS_ERR(idle))
		panic("SMP: fork failed for CPU:%d", cpuid);

	task_thread_info(idle)->cpu = cpuid;

	/* Information to the CPU that is about to boot */
	smp_init_current_idle_thread = task_thread_info(idle);
	cpu_now_booting = cpuid;

	/* Kick it */
	cpu_set(cpuid, cpu_online_map);
	cpu_set(cpuid, cpu_mask);
	send_ipi(IPI_BOOT, 0, cpu_mask);
	cpu_clear(cpuid, cpu_online_map);

	/* Wait for CPU to come online */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpuid)) {
			cpu_now_booting = 0;
			smp_init_current_idle_thread = NULL;
			return 0; /* CPU online */
		}
		udelay(100);
		barrier();
	}

	put_task_struct(idle);
	idle = NULL;

	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;
}
Beispiel #24
0
asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
{
	struct task_struct *child;
	long ret;

	/*
	 * This lock_kernel fixes a subtle race with suid exec
	 */
	lock_kernel();
	if (request == PTRACE_TRACEME) {
		ret = ptrace_traceme();
		goto out;
	}

	child = ptrace_get_task_struct(pid);
	if (IS_ERR(child)) {
		ret = PTR_ERR(child);
		goto out;
	}

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		goto out_put_task_struct;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (ret < 0)
		goto out_put_task_struct;

	ret = arch_ptrace(child, request, addr, data);
	if (ret < 0)
		goto out_put_task_struct;

 out_put_task_struct:
	put_task_struct(child);
 out:
	unlock_kernel();
	return ret;
}
static struct task_struct *vperfctr_get_tsk(struct vperfctr *perfctr)
{
	struct task_struct *tsk;

	tsk = current;
	if (perfctr != current->thread.perfctr) {
		/* this synchronises with vperfctr_unlink() and itself */
		spin_lock(&perfctr->owner_lock);
		tsk = perfctr->owner;
		if (tsk)
			get_task_struct(tsk);
		spin_unlock(&perfctr->owner_lock);
		if (tsk) {
			int ret = ptrace_check_attach(tsk, 0);
			if (ret < 0) {
				put_task_struct(tsk);
				return ERR_PTR(ret);
			}
		}
	}
	return tsk;
}
Beispiel #26
0
/*
 * return timer owned by the process, used by exit_itimers
 */
static void itimer_delete(struct k_itimer *timer)
{
	unsigned long flags;

retry_delete:
	spin_lock_irqsave(&timer->it_lock, flags);

	if (timer_delete_hook(timer) == TIMER_RETRY) {
		unlock_timer(timer, flags);
		goto retry_delete;
	}
	list_del(&timer->list);
	/*
	 * This keeps any tasks waiting on the spin lock from thinking
	 * they got something (see the lock code above).
	 */
	put_task_struct(timer->it_process);
	timer->it_process = NULL;

	unlock_timer(timer, flags);
	release_posix_timer(timer, IT_ID_SET);
}
Beispiel #27
0
/*
 * release a single token back to a semaphore
 * - entered with lock held and interrupts disabled
 */
void __up(struct semaphore *sem)
{
	struct task_struct *tsk;
	struct sem_waiter *waiter;

	semtrace(sem, "Entering __up");

	/* grant the token to the process at the front of the queue */
	waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);

	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
	 * It is an allocated on the waiter's stack and may become invalid at
	 * any time after that point (due to a wakeup from another source).
	 */
	list_del_init(&waiter->list);
	tsk = waiter->task;
	smp_mb();
	waiter->task = NULL;
	wake_up_process(tsk);
	put_task_struct(tsk);

	semtrace(sem, "Leaving __up");
}
Beispiel #28
0
/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
 * @q: queue entry that must be signaled
 * @error: Error value for the signal
 *
 * Prepare the wake-up of the queue entry q.
 */
static void wake_up_sem_queue_prepare(struct list_head *pt,
				struct sem_queue *q, int error)
{
#ifdef CONFIG_PREEMPT_RT_BASE
	struct task_struct *p = q->sleeper;
	get_task_struct(p);
	q->status = error;
	wake_up_process(p);
	put_task_struct(p);
#else
	if (list_empty(pt)) {
		/*
		 * Hold preempt off so that we don't get preempted and have the
		 * wakee busy-wait until we're scheduled back on.
		 */
		preempt_disable();
	}
	q->status = IN_WAKEUP;
	q->pid = error;

	list_add_tail(&q->simple_list, pt);
#endif
}
Beispiel #29
0
static void
stapiu_decrement_process_semaphores(struct stapiu_process *p,
				    struct list_head *consumers)
{
	struct task_struct *task;
	rcu_read_lock();

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
	/* We'd like to call find_task_by_pid_ns() here, but it isn't
	 * exported.  So, we call what it calls...  */
	task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
#else
	task = find_task_by_pid(p->tgid);
#endif

	/* The task may have exited while we weren't watching.  */
	if (task) {
		struct stapiu_consumer *c;

		/* Holding the rcu read lock makes us atomic, and we
		 * can't write userspace memory while atomic (which
		 * could pagefault).  So, instead we lock the task
		 * structure, then release the rcu read lock. */
		get_task_struct(task);
		rcu_read_unlock();

		list_for_each_entry(c, consumers, target_consumer) {
			if (c->sdt_sem_offset) {
				unsigned long addr = p->base + c->sdt_sem_offset;
				stapiu_write_task_semaphore(task, addr,
						(unsigned short) -1);
			}
		}
		put_task_struct(task);
	}
	else {
Beispiel #30
0
static const char *proc_ns_get_link(struct dentry *dentry,
				    struct inode *inode,
				    struct delayed_call *done)
{
	const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
	struct task_struct *task;
	struct path ns_path;
	void *error = ERR_PTR(-EACCES);

	if (!dentry)
		return ERR_PTR(-ECHILD);

	task = get_proc_task(inode);
	if (!task)
		return error;

	if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
		error = ns_get_path(&ns_path, task, ns_ops);
		if (!error)
			nd_jump_link(&ns_path);
	}
	put_task_struct(task);
	return error;
}