Beispiel #1
0
/* afs_osi_TimedSleep
 * 
 * Arguments:
 * event - event to sleep on
 * ams --- max sleep time in milliseconds
 * aintok - 1 if should sleep interruptibly
 *
 * Returns 0 if timeout, EINTR if signalled, and EGAIN if it might
 * have raced.
 */
int
afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
{
    int code = 0;
    long ticks = (ams * HZ / 1000) + 1;
    struct afs_event *evp;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif

    evp = afs_getevent(event);
    if (!evp) {
	afs_addevent(event);
	evp = afs_getevent(event);
    }

    add_wait_queue(&evp->cond, &wait);
    set_current_state(TASK_INTERRUPTIBLE);
    /* always sleep TASK_INTERRUPTIBLE to keep load average
     * from artifically increasing. */
    AFS_GUNLOCK();

    if (aintok) {
	if (schedule_timeout(ticks))
	    code = EINTR;
    } else
	schedule_timeout(ticks);
#ifdef CONFIG_PM
    if (
#ifdef PF_FREEZE
	    current->flags & PF_FREEZE
#else
#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
	    !current->todo
#else
#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
            test_ti_thread_flag(current->thread_info, TIF_FREEZE)
#else
            test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
#endif
#endif
#endif
	    )
#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
	refrigerator(PF_FREEZE);
#else
	refrigerator();
#endif
#endif

    AFS_GLOCK();
    remove_wait_queue(&evp->cond, &wait);
    set_current_state(TASK_RUNNING);

    relevent(evp);

    return code;
}
Beispiel #2
0
/* afs_osi_SleepSig
 *
 * Waits for an event to be notified, returning early if a signal
 * is received.  Returns EINTR if signaled, and 0 otherwise.
 */
int
afs_osi_SleepSig(void *event)
{
    struct afs_event *evp;
    int seq, retval;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif

    evp = afs_getevent(event);
    if (!evp) {
	afs_addevent(event);
	evp = afs_getevent(event);
    }

    seq = evp->seq;
    retval = 0;

    add_wait_queue(&evp->cond, &wait);
    while (seq == evp->seq) {
	set_current_state(TASK_INTERRUPTIBLE);
	AFS_ASSERT_GLOCK();
	AFS_GUNLOCK();
	schedule();
#ifdef CONFIG_PM
	if (
#ifdef PF_FREEZE
	    current->flags & PF_FREEZE
#else
#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
	    !current->todo
#else
#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
            test_ti_thread_flag(current->thread_info, TIF_FREEZE)
#else
            test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
#endif
#endif
#endif
	    )
#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
	    refrigerator(PF_FREEZE);
#else
	    refrigerator();
#endif
#endif
	AFS_GLOCK();
	if (signal_pending(current)) {
	    retval = EINTR;
	    break;
	}
    }
    remove_wait_queue(&evp->cond, &wait);
    set_current_state(TASK_RUNNING);

    relevent(evp);
    return retval;
}
/*
 * Set the child iWMMXt state.
 */
static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
{
	struct thread_info *thread = task_thread_info(tsk);

	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
		return -EACCES;
	iwmmxt_task_release(thread);  /* force a reload */
	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
		? -EFAULT : 0;
}
/*
 * Get the child iWMMXt state.
 */
static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
{
	struct thread_info *thread = task_thread_info(tsk);

	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
		return -ENODATA;
	iwmmxt_task_disable(thread);  /* force it to ram */
	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
		? -EFAULT : 0;
}
Beispiel #5
0
noinline void
fcse_flush_all_done(unsigned seq, unsigned dirty)
{
	unsigned long flags;

	if (!cache_is_vivt())
		return;

	spin_lock_irqsave(&fcse_lock, flags);
#if defined(CONFIG_IPIPE)
	if (!test_ti_thread_flag(current_thread_info(), TIF_SWITCHED))
#elif defined(CONFIG_ARM_FCSE_PREEMPT_FLUSH)
	if (seq == nr_context_switches())
#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */
		fcse_clear_dirty_all();

	if (dirty && current->mm != &init_mm && current->mm) {
		unsigned fcse_pid =
			current->mm->context.fcse.pid >> FCSE_PID_SHIFT;
		__set_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty);
	}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_t new_mask;
	cpumask_t effective_mask;
	int retval;
	struct task_struct *p;
	struct thread_info *ti;
	uid_t euid;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	read_lock(&tasklist_lock);

	p = find_process_by_pid(pid);
	if (!p) {
		read_unlock(&tasklist_lock);
		put_online_cpus();
		return -ESRCH;
	}

	/*
	 * It is not safe to call set_cpus_allowed with the
	 * tasklist_lock held.  We will bump the task_struct's
	 * usage count and drop tasklist_lock before invoking
	 * set_cpus_allowed.
	 */
	get_task_struct(p);

	euid = current_euid();
	retval = -EPERM;
	if (euid != p->cred->euid && euid != p->cred->uid &&
	    !capable(CAP_SYS_NICE)) {
		read_unlock(&tasklist_lock);
		goto out_unlock;
	}

	retval = security_task_setscheduler(p, 0, NULL);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	p->thread.user_cpus_allowed = new_mask;

	/* Unlock the task list */
	read_unlock(&tasklist_lock);

	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(new_mask, mt_fpu_cpumask)) {
		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, &effective_mask);
	} else {
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, &new_mask);
	}

out_unlock:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Beispiel #7
0
asmlinkage struct pt_regs *do_debug(struct pt_regs *regs)
{
    struct thread_info	*ti;
    unsigned long		trampoline_addr;
    u32			status;
    u32			ctrl;
    int			code;

    status = ocd_read(DS);
    ti = current_thread_info();
    code = TRAP_BRKPT;

    pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n",
             status, regs->pc, regs->sr, ti->flags);

    if (!user_mode(regs)) {
        unsigned long	die_val = DIE_BREAKPOINT;

        if (status & (1 << OCD_DS_SSS_BIT))
            die_val = DIE_SSTEP;

        if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP)
                == NOTIFY_STOP)
            return regs;

        if ((status & (1 << OCD_DS_SWB_BIT))
                && test_and_clear_ti_thread_flag(
                    ti, TIF_BREAKPOINT)) {
            /*
             * Explicit breakpoint from trampoline or
             * exception/syscall/interrupt handler.
             *
             * The real saved regs are on the stack right
             * after the ones we saved on entry.
             */
            regs++;
            pr_debug("  -> TIF_BREAKPOINT done, adjusted regs:"
                     "PC=0x%08lx SR=0x%08lx\n",
                     regs->pc, regs->sr);
            BUG_ON(!user_mode(regs));

            if (test_thread_flag(TIF_SINGLE_STEP)) {
                pr_debug("Going to do single step...\n");
                return regs;
            }

            /*
             * No TIF_SINGLE_STEP means we're done
             * stepping over a syscall. Do the trap now.
             */
            code = TRAP_TRACE;
        } else if ((status & (1 << OCD_DS_SSS_BIT))
                   && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) {

            pr_debug("Stepped into something, "
                     "setting TIF_BREAKPOINT...\n");
            set_ti_thread_flag(ti, TIF_BREAKPOINT);

            /*
             * We stepped into an exception, interrupt or
             * syscall handler. Some exception handlers
             * don't check for pending work, so we need to
             * set up a trampoline just in case.
             *
             * The exception entry code will undo the
             * trampoline stuff if it does a full context
             * save (which also means that it'll check for
             * pending work later.)
             */
            if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) {
                trampoline_addr
                    = (unsigned long)&debug_trampoline;

                pr_debug("Setting up trampoline...\n");
                ti->rar_saved = sysreg_read(RAR_EX);
                ti->rsr_saved = sysreg_read(RSR_EX);
                sysreg_write(RAR_EX, trampoline_addr);
                sysreg_write(RSR_EX, (MODE_EXCEPTION
                                      | SR_EM | SR_GM));
                BUG_ON(ti->rsr_saved & MODE_MASK);
            }

            /*
             * If we stepped into a system call, we
             * shouldn't do a single step after we return
             * since the return address is right after the
             * "scall" instruction we were told to step
             * over.
             */
            if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) {
                pr_debug("Supervisor; no single step\n");
                clear_ti_thread_flag(ti, TIF_SINGLE_STEP);
            }

            ctrl = ocd_read(DC);
            ctrl &= ~(1 << OCD_DC_SS_BIT);
            ocd_write(DC, ctrl);

            return regs;
        } else {
            printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n",
                   status);
            printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags);
            die("Unhandled debug trap in kernel mode",
                regs, SIGTRAP);
        }
    } else if (status & (1 << OCD_DS_SSS_BIT)) {
        /* Single step in user mode */
        code = TRAP_TRACE;

        ctrl = ocd_read(DC);
        ctrl &= ~(1 << OCD_DC_SS_BIT);
        ocd_write(DC, ctrl);
    }

    pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n",
             code, regs->pc, regs->sr);

    clear_thread_flag(TIF_SINGLE_STEP);
    _exception(SIGTRAP, regs, code, instruction_pointer(regs));

    return regs;
}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_var_t cpus_allowed, new_mask, effective_mask;
	struct thread_info *ti;
	struct task_struct *p;
	int retval;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		put_online_cpus();
		return -ESRCH;
	}

	/* Prevent p going away */
	get_task_struct(p);
	rcu_read_unlock();

	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_new_mask;
	}
	retval = -EPERM;
	if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
		goto out_unlock;

	retval = security_task_setscheduler(p);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);

 again:
	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, effective_mask);
	} else {
		cpumask_copy(effective_mask, new_mask);
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, new_mask);
	}

	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(effective_mask, cpus_allowed)) {
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
			cpumask_copy(new_mask, cpus_allowed);
			goto again;
		}
	}
out_unlock:
	free_cpumask_var(effective_mask);
out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Beispiel #9
0
/* CV_WAIT and CV_TIMEDWAIT sleep until the specified event occurs, or, in the
 * case of CV_TIMEDWAIT, until the specified timeout occurs.
 * - NOTE: that on Linux, there are circumstances in which TASK_INTERRUPTIBLE
 *   can wake up, even if all signals are blocked
 * - TODO: handle signals correctly by passing an indication back to the
 *   caller that the wait has been interrupted and the stack should be cleaned
 *   up preparatory to signal delivery
 */
int
afs_cv_wait(afs_kcondvar_t * cv, afs_kmutex_t * l, int sigok)
{
    int seq, isAFSGlocked = ISAFS_GLOCK();
    sigset_t saved_set;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif
    sigemptyset(&saved_set);
    seq = cv->seq;
    
    set_current_state(TASK_INTERRUPTIBLE);
    add_wait_queue(&cv->waitq, &wait);

    if (isAFSGlocked)
	AFS_GUNLOCK();
    MUTEX_EXIT(l);

    if (!sigok) {
	SIG_LOCK(current);
	saved_set = current->blocked;
	sigfillset(&current->blocked);
	RECALC_SIGPENDING(current);
	SIG_UNLOCK(current);
    }

    while(seq == cv->seq) {
	schedule();
#ifdef AFS_LINUX26_ENV
#ifdef CONFIG_PM
	if (
#ifdef PF_FREEZE
	    current->flags & PF_FREEZE
#else
#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
	    !current->todo
#else
#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
	    test_ti_thread_flag(current->thread_info, TIF_FREEZE)
#else
	    test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
#endif
#endif
#endif
	    )
#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
	    refrigerator(PF_FREEZE);
#else
	    refrigerator();
#endif
	    set_current_state(TASK_INTERRUPTIBLE);
#endif
#endif
    }

    remove_wait_queue(&cv->waitq, &wait);
    set_current_state(TASK_RUNNING);

    if (!sigok) {
	SIG_LOCK(current);
	current->blocked = saved_set;
	RECALC_SIGPENDING(current);
	SIG_UNLOCK(current);
    }

    if (isAFSGlocked)
	AFS_GLOCK();
    MUTEX_ENTER(l);

    return (sigok && signal_pending(current)) ? EINTR : 0;
}