Ejemplo n.º 1
0
static void wake_oom_reaper(struct task_struct *tsk)
{
	/* mm is already queued? */
	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
		return;

	get_task_struct(tsk);

	spin_lock(&oom_reaper_lock);
	tsk->oom_reaper_list = oom_reaper_list;
	oom_reaper_list = tsk;
	spin_unlock(&oom_reaper_lock);
	trace_wake_reaper(tsk->pid);
	wake_up(&oom_reaper_wait);
}
Ejemplo n.º 2
0
/**
 * @brief PPP callback to retrieve current process info for the running OS.
 */
void on_get_current_process(CPUState *env, OsiProc **out_p) {
	OsiProc *p = NULL;
	PTR ts;

	//	target_long asid = panda_current_asid(env);
	ts = get_task_struct(env, (_ESP & THREADINFO_MASK));
	if (ts) {
		// valid task struct
		// got a reasonable looking process.
		// return it and save in cache
		p = (OsiProc *)g_malloc0(sizeof(OsiProc));
		fill_osiproc(env, p, ts);
	}
	*out_p = p;
}
Ejemplo n.º 3
0
/*
 * interruptibly wait for a token to be granted from a semaphore
 * - entered with lock held and interrupts disabled
 */
int __down_interruptible(struct semaphore *sem, unsigned long flags)
{
	struct task_struct *tsk = current;
	struct sem_waiter waiter;
	int ret;

	semtrace(sem,"Entering __down_interruptible");

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	set_task_state(tsk, TASK_INTERRUPTIBLE);

	spin_unlock_irqrestore(&sem->wait_lock, flags);

	/* wait to be given the semaphore */
	ret = 0;
	for (;;) {
		if (list_empty(&waiter.list))
			break;
		if (unlikely(signal_pending(current)))
			goto interrupted;
		schedule();
		set_task_state(tsk, TASK_INTERRUPTIBLE);
	}

 out:
	tsk->state = TASK_RUNNING;
	semtrace(sem, "Leaving __down_interruptible");
	return ret;

 interrupted:
	spin_lock_irqsave(&sem->wait_lock, flags);

	if (!list_empty(&waiter.list)) {
		list_del(&waiter.list);
		ret = -EINTR;
	}

	spin_unlock_irqrestore(&sem->wait_lock, flags);
	if (ret == -EINTR)
		put_task_struct(current);
	goto out;
}
Ejemplo n.º 4
0
/*
 * Remove a waiter from a lock
 *
 * Must be called with lock->wait_lock held
 */
static void remove_waiter(struct rt_mutex *lock,
			  struct rt_mutex_waiter *waiter)
{
	int first = (waiter == rt_mutex_top_waiter(lock));
	struct task_struct *owner = rt_mutex_owner(lock);
	unsigned long flags;
	int chain_walk = 0;

	raw_spin_lock_irqsave(&current->pi_lock, flags);
	plist_del(&waiter->list_entry, &lock->wait_list);
	waiter->task = NULL;
	current->pi_blocked_on = NULL;
	raw_spin_unlock_irqrestore(&current->pi_lock, flags);

	if (first && owner != current) {

		raw_spin_lock_irqsave(&owner->pi_lock, flags);

		plist_del(&waiter->pi_list_entry, &owner->pi_waiters);

		if (rt_mutex_has_waiters(lock)) {
			struct rt_mutex_waiter *next;

			next = rt_mutex_top_waiter(lock);
			plist_add(&next->pi_list_entry, &owner->pi_waiters);
		}
		__rt_mutex_adjust_prio(owner);

		if (owner->pi_blocked_on)
			chain_walk = 1;

		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
	}

	WARN_ON(!plist_node_empty(&waiter->pi_list_entry));

	if (!chain_walk)
		return;

	/* gets dropped in rt_mutex_adjust_prio_chain()! */
	get_task_struct(owner);

	raw_spin_unlock(&lock->wait_lock);

	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);

	raw_spin_lock(&lock->wait_lock);
}
Ejemplo n.º 5
0
/*
 * Remove a waiter from a lock and give up
 *
 * Must be called with lock->wait_lock held and
 * have just failed to try_to_take_rt_mutex().
 */
static void remove_waiter(struct rt_mutex *lock,
			  struct rt_mutex_waiter *waiter)
{
	int first = (waiter == rt_mutex_top_waiter(lock));
	struct task_struct *owner = rt_mutex_owner(lock);
	unsigned long flags;
	int chain_walk = 0;

	raw_spin_lock_irqsave(&current->pi_lock, flags);
	rt_mutex_dequeue(lock, waiter);
	current->pi_blocked_on = NULL;
	raw_spin_unlock_irqrestore(&current->pi_lock, flags);

	if (!owner)
		return;

	if (first) {

		raw_spin_lock_irqsave(&owner->pi_lock, flags);

		rt_mutex_dequeue_pi(owner, waiter);

		if (rt_mutex_has_waiters(lock)) {
			struct rt_mutex_waiter *next;

			next = rt_mutex_top_waiter(lock);
			rt_mutex_enqueue_pi(owner, next);
		}
		__rt_mutex_adjust_prio(owner);

		if (owner->pi_blocked_on)
			chain_walk = 1;

		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
	}

	if (!chain_walk)
		return;

	/* gets dropped in rt_mutex_adjust_prio_chain()! */
	get_task_struct(owner);

	raw_spin_unlock(&lock->wait_lock);

	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);

	raw_spin_lock(&lock->wait_lock);
}
Ejemplo n.º 6
0
/**
 * @brief PPP callback to retrieve OsiModules from the running OS.
 *
 * Current implementation returns all the memory areas mapped by the
 * process and the files they were mapped from. Libraries that have
 * many mappings will appear multiple times.
 *
 * @todo Remove duplicates from results.
 */
void on_get_libraries(CPUState *env, OsiProc *p, OsiModules **out_ms) {
	PTR ts_first, ts_current;
	target_ulong current_pid;
	OsiModules *ms;
	OsiModule *m;
	uint32_t ms_capacity = 16;

	PTR vma_first, vma_current;

	// Find the process with the indicated pid.
	ts_first = ts_current = get_task_struct(env, (_ESP & THREADINFO_MASK));
	if (ts_current == (PTR)NULL) goto error0;

	do {
		if ((current_pid = get_pid(env, ts_current)) == p->pid) break;
		ts_current = get_task_struct_next(env, ts_current);
	} while(ts_current != (PTR)NULL && ts_current != ts_first);

	// memory read error or process not found
	if (ts_current == (PTR)NULL || current_pid != p->pid) goto error0;

	// Read the module info for the process.
	vma_first = vma_current = get_vma_first(env, ts_current);
	if (vma_current == (PTR)NULL) goto error0;

	ms = (OsiModules *)g_malloc0(sizeof(OsiModules));
	ms->module = g_new(OsiModule, ms_capacity);
	do {
		if (ms->num == ms_capacity) {
			ms_capacity *= 2;
			ms->module = g_renew(OsiModule, ms->module, ms_capacity);
		}

		m = &ms->module[ms->num++];
		memset(m, 0, sizeof(OsiModule));
		fill_osimodule(env, m, vma_current);

		vma_current = get_vma_next(env, vma_current);
	} while(vma_current != (PTR)NULL && vma_current != vma_first);

	*out_ms = ms;
	return;

error0:
	*out_ms = NULL;
	return;
}
Ejemplo n.º 7
0
void masq_get_state_all(struct bproc_masq_master_t *m) {
    long this_update;
    struct task_struct *p = 0;
    struct list_head *l;
    struct bproc_krequest_t *req;
    struct bproc_status_msg_t *msg;

    this_update = jiffies;

    do {
	req = bproc_new_req(BPROC_RESPONSE(BPROC_GET_STATUS),
			    sizeof(*msg), GFP_KERNEL);
	if (!req) {
	    printk("bproc: masq_get_state_all: out of memory.\n");
	    return;
	}
	msg = (struct bproc_status_msg_t *)bproc_msg(req);

	/* This could be made more efficient by moving the head of the
	 * list after each one... */

	read_lock(&tasklist_lock);

	for (l = m->proc_list.next; l != &m->proc_list; l = l->next) {
	    p = list_entry(l, struct task_struct, bproc.list);
	    if (p->bproc.last_update < this_update) {
		p->bproc.last_update = this_update;
		get_task_struct(p);
		break;
	    }
	    p = 0;
	}
	read_unlock(&tasklist_lock);

	if (p) {
	    bpr_to_node(msg, -1);
	    bpr_from_real(msg, p->bproc.pid);
	    msg->hdr.result = 0;
	    pack_process_status(msg, p, 1);
	    bproc_send_req(&m->req, req);

	    put_task_struct(p);
	}
	bproc_put_req(req);
    } while (p);
}
Ejemplo n.º 8
0
static void wake_oom_reaper(struct task_struct *tsk)
{
	if (!oom_reaper_th)
		return;

	/* tsk is already queued? */
	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
		return;

	get_task_struct(tsk);

	spin_lock(&oom_reaper_lock);
	tsk->oom_reaper_list = oom_reaper_list;
	oom_reaper_list = tsk;
	spin_unlock(&oom_reaper_lock);
	wake_up(&oom_reaper_wait);
}
Ejemplo n.º 9
0
// Add t to the monitored pids
moca_task Moca_AddTask(struct task_struct *t)
{
    task_data data;
    moca_task tsk;
    struct _moca_task tmptsk;
    int status;
    printk("Moca monitoring task %p [%s] \n",t, t->comm);


    //Create the task data

    get_task_struct(t);
    tmptsk.key=t;
    write_lock(&Moca_tasksLock);
    tsk=(moca_task)Moca_AddToMap(Moca_tasksMap,(hash_entry)&tmptsk,&status);
    switch(status)
    {
        case MOCA_HASHMAP_FULL:
            printk(KERN_NOTICE "Moca too many tasks ignoring %p",t);
            goto fail;
        case MOCA_HASHMAP_ERROR:
            printk("Moca unhandeled hashmap error");
            goto fail;
        case  MOCA_HASHMAP_ALREADY_IN_MAP:
            MOCA_DEBUG_PRINT("Moca Adding an already exixsting task %p\n", t);
            return tsk;
        default:
            //normal add
            MOCA_DEBUG_PRINT("Moca Added task %p at pos %d \n", t, status);
            break;
    }
    // Here we are sure that t has been added to the map
    data=Moca_InitData(t);
    if(!data)
    {
        Moca_RemoveTask(t);
        goto fail;
    }
    tsk->data=data;
    tsk->touched=0;
    write_unlock(&Moca_tasksLock);
    return tsk;
fail:
    write_unlock(&Moca_tasksLock);
    return NULL;
}
Ejemplo n.º 10
0
void __vperfctr_release(struct task_struct *child_tsk)
{
	struct task_struct *parent_tsk = child_tsk->parent;
	struct vperfctr *child_perfctr = child_tsk->thread.perfctr;

	// this is invoked either in the waitpid() or if there the parent is not
	// interesting in its children. In the latter case, "parent_tsk != current"

	// one releases oneself, when the parent is not interested in one's data
	// but even then we would like to add our counters to those of the parent's
	
	// another step towards freeing the task_struct(ure).
	child_tsk->thread.perfctr = NULL;

	// if the parent is releasing the children's task structure, then it (the
	// parent) can go ahead and add the children's vperfctr's values to the
	// 'children' field in the parent's 'vperfctr' structure.
	// So, am 'I' the parent of the task_structure I am attempting to release?
	// When current == parent_tsk, the child's counts can be merged
	// into the parent's immediately. This is the common case.

	// printk ("%s, %d\n", __FUNCTION__, __LINE__);
	if (child_perfctr == NULL) {
		// printk("%s, %d, child_perfctr == NULL\n", __FUNCTION__, __LINE__);
	}

	if (parent_tsk == current)
		do_vperfctr_release(child_perfctr, parent_tsk);
	else {

		/* When current != parent_tsk, the parent must be task_lock()ed
		 * before its perfctr state can be accessed. task_lock() is illegal
		 * here due to the write_lock_irq(&tasklist_lock) in release_task(),
		 * so the operation is done via schedule_work(). Also, increment
		 * the reference count of parent's task_struct so that it will not be
		 * freed for good
	     */

		get_task_struct(parent_tsk);	// increments the reference count

		INIT_WORK(&child_perfctr->work, scheduled_release);
		child_perfctr->parent_tsk = parent_tsk;
		schedule_work(&child_perfctr->work);
	}
}
Ejemplo n.º 11
0
/*
 * get a write lock on the semaphore
 * - we increment the waiting count anyway to indicate an exclusive lock
 */
void fastcall __sched __down_write(struct rw_semaphore *sem)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;
	unsigned long flags;

	rwsemtrace(sem, "Entering __down_write");

	spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity = -1;
		spin_unlock_irqrestore(&sem->wait_lock, flags);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.flags = RWSEM_WAITING_FOR_WRITE;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	spin_unlock_irqrestore(&sem->wait_lock, flags);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;

 out:
	rwsemtrace(sem, "Leaving __down_write");
}
Ejemplo n.º 12
0
static int completion_load(void)
{
	pr_info("%s\n", __func__);

	my_thread = kthread_create(kthread_func, NULL, "%s",
							"udemy kthread");
	if (IS_ERR(my_thread)) {
		pr_info("%s: cannot create kernel thread\n", __func__);
		return PTR_ERR(my_thread);
	}

	/* Protect my_thread from being freed by the kernel */
	get_task_struct(my_thread);

	/* Kick the thread */
	wake_up_process(my_thread);

	return 0;
}
Ejemplo n.º 13
0
/*
 * Recheck the pi chain, in case we got a priority setting
 *
 * Called from sched_setscheduler
 */
void rt_mutex_adjust_pi(struct task_struct *task)
{
	struct rt_mutex_waiter *waiter;
	unsigned long flags;

	raw_spin_lock_irqsave(&task->pi_lock, flags);

	waiter = task->pi_blocked_on;
	if (!waiter || waiter->list_entry.prio == task->prio) {
		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
		return;
	}

	raw_spin_unlock_irqrestore(&task->pi_lock, flags);

	/* gets dropped in rt_mutex_adjust_prio_chain()! */
	get_task_struct(task);
	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
}
/*
 * get a write lock on the semaphore
 * - we increment the waiting count anyway to indicate an exclusive lock
 */
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;
	unsigned long flags;

	raw_spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity = -1;
#ifdef CONFIG_BRCM_DEBUG_RWSEM
		sem->wr_owner = current;
#endif
		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.flags = RWSEM_WAITING_FOR_WRITE;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;
 out:
	;
}
Ejemplo n.º 15
0
/**
 * ptrace_get_task_struct  --  grab a task struct reference for ptrace
 * @pid:       process id to grab a task_struct reference of
 *
 * This function is a helper for ptrace implementations.  It checks
 * permissions and then grabs a task struct for use of the actual
 * ptrace implementation.
 *
 * Returns the task_struct for @pid or an ERR_PTR() on failure.
 */
struct task_struct *ptrace_get_task_struct(pid_t pid)
{
	struct task_struct *child;

	/*
	 * Tracing init is not allowed.
	 */
	if (pid == 1)
		return ERR_PTR(-EPERM);

	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);
	if (!child)
		return ERR_PTR(-ESRCH);
	return child;
}
Ejemplo n.º 16
0
void __vperfctr_release(struct task_struct *child_tsk)
{

#if 0
	struct task_struct *parent_tsk = child_tsk->parent;
	struct vperfctr *child_perfctr = child_tsk->arch.thread.perfctr;

	child_tsk->arch.thread.perfctr = NULL;
	if (parent_tsk == current)
		do_vperfctr_release(child_perfctr, parent_tsk);
	else {
		get_task_struct(parent_tsk);

		INIT_WORK(&child_perfctr->work, scheduled_release);

		child_perfctr->parent_tsk = parent_tsk;
		schedule_work(&child_perfctr->work);
	}
#endif 

}
/*
 * hps task control interface
 */
int hps_task_start(void)
{
	struct sched_param param = {.sched_priority = HPS_TASK_PRIORITY };

	if (hps_ctxt.tsk_struct_ptr == NULL) {
		hps_ctxt.tsk_struct_ptr = kthread_create(_hps_task_main, NULL, "hps_main");
		if (IS_ERR(hps_ctxt.tsk_struct_ptr))
			return PTR_ERR(hps_ctxt.tsk_struct_ptr);

		sched_setscheduler_nocheck(hps_ctxt.tsk_struct_ptr, SCHED_FIFO, &param);
		get_task_struct(hps_ctxt.tsk_struct_ptr);
		wake_up_process(hps_ctxt.tsk_struct_ptr);
		hps_warn("hps_task_start success, ptr: %p, pid: %d\n",
			 hps_ctxt.tsk_struct_ptr, hps_ctxt.tsk_struct_ptr->pid);
	} else {
		hps_warn("hps task already exist, ptr: %p, pid: %d\n",
			 hps_ctxt.tsk_struct_ptr, hps_ctxt.tsk_struct_ptr->pid);
	}

	return 0;
}
Ejemplo n.º 18
0
/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);

	get_task_struct(k);
	kthread = to_live_kthread(k);
	if (kthread) {
		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
		__kthread_unpark(k, kthread);
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;
	put_task_struct(k);

	trace_sched_kthread_stop_ret(ret);
	return ret;
}
Ejemplo n.º 19
0
static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
{
	struct get_pages_work *work;

	/* Spawn a worker so that we can acquire the
	 * user pages without holding our mutex. Access
	 * to the user pages requires mmap_sem, and we have
	 * a strict lock ordering of mmap_sem, struct_mutex -
	 * we already hold struct_mutex here and so cannot
	 * call gup without encountering a lock inversion.
	 *
	 * Userspace will keep on repeating the operation
	 * (thanks to EAGAIN) until either we hit the fast
	 * path or the worker completes. If the worker is
	 * cancelled or superseded, the task is still run
	 * but the results ignored. (This leads to
	 * complications that we may have a stray object
	 * refcount that we need to be wary of when
	 * checking for existing objects during creation.)
	 * If the worker encounters an error, it reports
	 * that error back to this function through
	 * obj->userptr.work = ERR_PTR.
	 */
	work = kmalloc(sizeof(*work), GFP_KERNEL);
	if (work == NULL)
		return ERR_PTR(-ENOMEM);

	obj->userptr.work = &work->work;

	work->obj = i915_gem_object_get(obj);

	work->task = current;
	get_task_struct(work->task);

	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);

	return ERR_PTR(-EAGAIN);
}
Ejemplo n.º 20
0
/*
 * get a read lock on the semaphore
 */
void __sched __down_read(struct rw_semaphore *sem)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;
	unsigned long flags;

	raw_spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->count >= 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->count++;
		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.type = RWSEM_WAITING_FOR_READ;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	__set_task_state(tsk, TASK_RUNNING);
 out:
	;
}
Ejemplo n.º 21
0
void __sched __down_read(struct rw_semaphore *sem)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;
	unsigned long flags;

	raw_spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
		
		sem->activity++;
		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	
	waiter.task = tsk;
	waiter.flags = RWSEM_WAITING_FOR_READ;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);

	
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;
 out:
	;
}
Ejemplo n.º 22
0
struct file *get_task_file(pid_t pid, int fd)
{
	int err;
	struct task_struct *tsk;
	struct files_struct *fs;
	struct file *file = NULL;

	err = -ESRCH;
	read_lock(&tasklist_lock);
	tsk = find_task_by_pid_ns(pid, get_exec_env()->ve_ns->pid_ns);
	if (tsk == NULL) {
		read_unlock(&tasklist_lock);
		goto out;
	}

	get_task_struct(tsk);
	read_unlock(&tasklist_lock);

	err = -EINVAL;
	fs = get_files_struct(tsk);
	if (fs == NULL)
		goto out_put;

	rcu_read_lock();
	err = -EBADF;
	file = fcheck_files(fs, fd);
	if (file == NULL)
		goto out_unlock;

	err = 0;
	get_file(file);

out_unlock:
	rcu_read_unlock();
	put_files_struct(fs);
out_put:
	put_task_struct(tsk);
out:
	return err ? ERR_PTR(err) : file;
}
Ejemplo n.º 23
0
/*
 * Tries to push a -rtws task to a "random" idle rq.
 */
static int push_idle_rtws(struct rq *this_rq, struct task_struct *p)
{
    struct rq *target_rq;
    int ret = 0, target_cpu;
    struct cpudl *cp = &this_rq->rd->rtwsc_cpudl;

retry:
    target_cpu = find_idle_cpu_rtws(cp);

    if (target_cpu == -1)
        return 0;

    printk(KERN_INFO "idle cpu %d\n", target_cpu);

    target_rq = cpu_rq(target_cpu);

    /* We might release rq lock */
    get_task_struct(p);

    double_lock_balance(this_rq, target_rq);

    if (unlikely(target_rq->rtws.nr_running)) {
        double_unlock_balance(this_rq, target_rq);
        put_task_struct(p);
        target_rq = NULL;
        goto retry;
    }

    set_task_cpu(p, target_cpu);
    activate_task(target_rq, p, 0);

    ret = 1;
    resched_task(target_rq->curr);

    double_unlock_balance(this_rq, target_rq);
    put_task_struct(p);

    return ret;
}
Ejemplo n.º 24
0
/*
 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
 * new timer already all-zeros initialized.
 */
static int posix_cpu_timer_create(struct k_itimer *new_timer)
{
	int ret = 0;
	const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
	struct task_struct *p;

	if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
		return -EINVAL;

	INIT_LIST_HEAD(&new_timer->it.cpu.entry);

	rcu_read_lock();
	if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
		if (pid == 0) {
			p = current;
		} else {
			p = find_task_by_vpid(pid);
			if (p && !same_thread_group(p, current))
				p = NULL;
		}
	} else {
		if (pid == 0) {
			p = current->group_leader;
		} else {
			p = find_task_by_vpid(pid);
			if (p && !has_group_leader_pid(p))
				p = NULL;
		}
	}
	new_timer->it.cpu.task = p;
	if (p) {
		get_task_struct(p);
	} else {
		ret = -EINVAL;
	}
	rcu_read_unlock();

	return ret;
}
Ejemplo n.º 25
0
/*
 * get a read lock on the semaphore
 */
void __sched __down_read(struct rw_anon_semaphore *sem)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;

	spin_lock_irq(&sem->wait_lock);

	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity++;
		spin_unlock_irq(&sem->wait_lock);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.flags = RWSEM_WAITING_FOR_READ;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	spin_unlock_irq(&sem->wait_lock);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;
 out:
	;
}
Ejemplo n.º 26
0
static struct task_struct *vperfctr_get_tsk(struct vperfctr *perfctr)
{
	struct task_struct *tsk;

	tsk = current;
	if (perfctr != current->thread.perfctr) {
		/* this synchronises with vperfctr_unlink() and itself */
		spin_lock(&perfctr->owner_lock);
		tsk = perfctr->owner;
		if (tsk)
			get_task_struct(tsk);
		spin_unlock(&perfctr->owner_lock);
		if (tsk) {
			int ret = ptrace_check_attach(tsk, 0);
			if (ret < 0) {
				put_task_struct(tsk);
				return ERR_PTR(ret);
			}
		}
	}
	return tsk;
}
Ejemplo n.º 27
0
/*
 * Simple selection loop. We chose the process with the highest
 * number of 'points'.  Returns -1 on scan abort.
 */
static struct task_struct *select_bad_process(struct oom_control *oc,
		unsigned int *ppoints, unsigned long totalpages)
{
	struct task_struct *p;
	struct task_struct *chosen = NULL;
	unsigned long chosen_points = 0;

	rcu_read_lock();
	for_each_process(p) {
		unsigned int points;

		switch (oom_scan_process_thread(oc, p, totalpages)) {
		case OOM_SCAN_SELECT:
			chosen = p;
			chosen_points = ULONG_MAX;
			/* fall through */
		case OOM_SCAN_CONTINUE:
			continue;
		case OOM_SCAN_ABORT:
			rcu_read_unlock();
			return (struct task_struct *)(-1UL);
		case OOM_SCAN_OK:
			break;
		};
		points = oom_badness(p, NULL, oc->nodemask, totalpages);
		if (!points || points < chosen_points)
			continue;

		chosen = p;
		chosen_points = points;
	}
	if (chosen)
		get_task_struct(chosen);
	rcu_read_unlock();

	*ppoints = chosen_points * 1000 / totalpages;
	return chosen;
}
Ejemplo n.º 28
0
/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
 * @q: queue entry that must be signaled
 * @error: Error value for the signal
 *
 * Prepare the wake-up of the queue entry q.
 */
static void wake_up_sem_queue_prepare(struct list_head *pt,
				struct sem_queue *q, int error)
{
#ifdef CONFIG_PREEMPT_RT_BASE
	struct task_struct *p = q->sleeper;
	get_task_struct(p);
	q->status = error;
	wake_up_process(p);
	put_task_struct(p);
#else
	if (list_empty(pt)) {
		/*
		 * Hold preempt off so that we don't get preempted and have the
		 * wakee busy-wait until we're scheduled back on.
		 */
		preempt_disable();
	}
	q->status = IN_WAKEUP;
	q->pid = error;

	list_add_tail(&q->simple_list, pt);
#endif
}
Ejemplo n.º 29
0
static int kthreads_load(void)
{
	int i;
	
	pr_info("%s\n", __func__);
	
	for (i = 0; i < NUM_KTHREADS; i++) {
		my_threads[i] = kthread_create(kthread_func, ((void *)i), "%s/%d",
						"kthread", i);
		if (IS_ERR(my_threads[i])) {
			pr_info("%s: cannot create kthread/%d\n", __func__, i);
			return PTR_ERR(my_threads[i]);
		}

		/* Protect my_thread from being destroyed by the kernel */
		get_task_struct(my_threads[i]);

		/* Kick the thread */
		wake_up_process(my_threads[i]);
	}

	return 0;
}
Ejemplo n.º 30
0
/*
 * wait for a lock to be granted
 */
static inline struct rw_semaphore *
rwsem_down_failed_common(struct rw_semaphore *sem,
			struct rwsem_waiter *waiter, signed long adjustment)
{
	struct task_struct *tsk = current;
	signed long count;

	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	spin_lock(&sem->wait_lock);
	waiter->task = tsk;
	get_task_struct(tsk);

	list_add_tail(&waiter->list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively read-locking */
	count = rwsem_atomic_update(adjustment, sem);

	/* if there are no active locks, wake the front queued process(es) up */
	if (!(count & RWSEM_ACTIVE_MASK))
		sem = __rwsem_do_wake(sem, 0);

	spin_unlock(&sem->wait_lock);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter->task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;

	return sem;
}