Beispiel #1
0
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		     unsigned long r6, unsigned long r7, unsigned long r8,
		     struct pt_regs *regs)
{
	struct rt_sigframe *rt_sf;
	struct sigcontext_struct sigctx;
	struct sigregs *sr;
	int ret;
	elf_gregset_t saved_regs;  /* an array of ELF_NGREG unsigned longs */
	sigset_t set;
	stack_t st;
	unsigned long prevsp;

	rt_sf = (struct rt_sigframe *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
	if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))
	    || copy_from_user(&set, &rt_sf->uc.uc_sigmask, sizeof(set))
	    || copy_from_user(&st, &rt_sf->uc.uc_stack, sizeof(st)))
		goto badframe;
	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sigmask_lock);
	current->blocked = set;
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	rt_sf++;			/* Look at next rt_sigframe */
	if (rt_sf == (struct rt_sigframe *)(sigctx.regs)) {
		/* Last stacked signal - restore registers -
		 * sigctx is initialized to point to the 
		 * preamble frame (where registers are stored) 
		 * see handle_signal()
		 */
		sr = (struct sigregs *) sigctx.regs;
		if (regs->msr & MSR_FP )
			giveup_fpu(current);
		if (copy_from_user(saved_regs, &sr->gp_regs,
				   sizeof(sr->gp_regs)))
			goto badframe;
		saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
			| (saved_regs[PT_MSR] & MSR_USERCHANGE);
		saved_regs[PT_SOFTE] = regs->softe;
		memcpy(regs, saved_regs, GP_REGS_SIZE);
		if (copy_from_user(current->thread.fpr, &sr->fp_regs,
				   sizeof(sr->fp_regs)))
			goto badframe;
		/* This function sets back the stack flags into
		   the current task structure.  */
		sys_sigaltstack(&st, NULL);

		ret = regs->result;
	} else {
		/* More signals to go */
		/* Set up registers for next signal handler */
		regs->gpr[1] = (unsigned long)rt_sf - __SIGNAL_FRAMESIZE;
		if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx)))
			goto badframe;
		sr = (struct sigregs *) sigctx.regs;
		regs->gpr[3] = ret = sigctx.signal;
		/* Get the siginfo   */
		get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo);
		/* Get the ucontext */
		get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc);
		regs->gpr[6] = (unsigned long) rt_sf;

		regs->link = (unsigned long) &sr->tramp;
		regs->nip = sigctx.handler;
		if (get_user(prevsp, &sr->gp_regs[PT_R1])
		    || put_user(prevsp, (unsigned long *) regs->gpr[1]))
			goto badframe;
	}
	return ret;

badframe:
	do_exit(SIGSEGV);
}
Beispiel #2
0
int ckpt_restore_signals(ckpt_desc_t desc)
{
    int i;
    int ret;
    stack_t sigstack;
    sigset_t sigblocked;

    log_restore_signals("restoring sigstack ...");
    if (ckpt_read(desc, &sigstack, sizeof(stack_t)) != sizeof(stack_t)) {
        log_err("failed to get sigstack");
        return -EIO;
    }

    ret = compat_sigaltstack(current, &sigstack, NULL, 0);
    if (ret) {
        log_err("failed to restore sigstack (ret=%d)", ret);
        return ret;
    }

    log_restore_signals("restoring sigblocked ...");
    if (ckpt_read(desc, &sigblocked, sizeof(sigset_t)) != sizeof(sigset_t)) {
        log_err("failed to restore sigstack");
        return -EIO;
    }

    sigdelsetmask(&sigblocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
    spin_lock_irq(&current->sighand->siglock);
    current->blocked = sigblocked;
    recalc_sigpending();
    spin_unlock_irq(&current->sighand->siglock);

    log_restore_signals("restoring pending ...");
    ret = ckpt_restore_sigpending(&current->pending, 0, desc);
    if (ret) {
        log_err("failed to restore pending");
        return ret;
    }

    ret = ckpt_restore_sigpending(&current->signal->shared_pending, 1, desc);
    if (ret) {
        log_err("failed to restore shared_pending");
        return ret;
    }

    log_restore_signals("restoring sigaction ...");
    for (i = 0; i < _NSIG; i++) {
        struct k_sigaction sigaction;

        if (ckpt_read(desc, &sigaction, sizeof(struct k_sigaction)) != sizeof(struct k_sigaction)) {
            log_err("failed to get sigaction");
            return -EIO;
        }

        if ((i != SIGKILL - 1) && (i != SIGSTOP - 1)) {
            ret = do_sigaction(i + 1, &sigaction, 0);
            if (ret) {
                log_err("failed to restore sigaction (ret=%d)", ret);
                return ret;
            }
        }
    }
    log_restore_pos(desc);
    return 0;
}
Beispiel #3
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_fork_read_lock(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		current->signal->nr_threads++;
		atomic_inc(&current->signal->live);
		atomic_inc(&current->signal->sigcnt);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (is_child_reaper(pid))
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__this_cpu_inc(process_counts);
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_fork_read_unlock(current);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	if (unlikely(clone_flags & CLONE_NEWPID))
		pid_ns_release_proc(p->nsproxy->pid_ns);
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm) {
		task_lock(p);
		if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
			atomic_dec(&p->mm->oom_disable_count);
		task_unlock(p);
		mmput(p->mm);
	}
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_fork_read_unlock(current);
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Beispiel #4
0
/*
 * Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 */
int do_signal(sigset_t *oldset, struct pt_regs *regs)
{
	siginfo_t info;
	struct k_sigaction ka;
	unsigned long frame, newsp;
	int signr, ret;

	if (!oldset)
		oldset = &current->blocked;

	newsp = frame = 0;

	signr = get_signal_to_deliver(&info, &ka, regs, NULL);

	if (TRAP(regs) == 0x0C00		/* System Call! */
	    && regs->ccr & 0x10000000		/* error signalled */
	    && ((ret = regs->gpr[3]) == ERESTARTSYS
		|| ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
		|| ret == ERESTART_RESTARTBLOCK)) {

		if (signr > 0
		    && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
			|| (ret == ERESTARTSYS
			    && !(ka.sa.sa_flags & SA_RESTART)))) {
			/* make the system call return an EINTR error */
			regs->result = -EINTR;
			regs->gpr[3] = EINTR;
			/* note that the cr0.SO bit is already set */
		} else {
			regs->nip -= 4;	/* Back up & retry system call */
			regs->result = 0;
			regs->trap = 0;
			if (ret == ERESTART_RESTARTBLOCK)
				regs->gpr[0] = __NR_restart_syscall;
			else
				regs->gpr[3] = regs->orig_gpr3;
		}
	}

	if (signr == 0)
		return 0;		/* no signals delivered */

	if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
	    && !on_sig_stack(regs->gpr[1]))
		newsp = current->sas_ss_sp + current->sas_ss_size;
	else
		newsp = regs->gpr[1];
	newsp &= ~0xfUL;

	/* Whee!  Actually deliver the signal.  */
	if (ka.sa.sa_flags & SA_SIGINFO)
		handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
	else
		handle_signal(signr, &ka, &info, oldset, regs, newsp);

	if (!(ka.sa.sa_flags & SA_NODEFER)) {
		spin_lock_irq(&current->sighand->siglock);
		sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
		sigaddset(&current->blocked, signr);
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}

	return 1;
}
Beispiel #5
0
asmlinkage void do_sigreturn(struct pt_regs *regs)
{
	struct signal_frame __user *sf;
	unsigned long up_psr, pc, npc;
	sigset_t set;
	__siginfo_fpu_t __user *fpu_save;
	__siginfo_rwin_t __user *rwin_save;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack();

	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];

	/* 1. Make sure we are not getting garbage from the user */
	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
		goto segv_and_exit;

	if (((unsigned long) sf) & 3)
		goto segv_and_exit;

	err = __get_user(pc,  &sf->info.si_regs.pc);
	err |= __get_user(npc, &sf->info.si_regs.npc);

	if ((pc | npc) & 3)
		goto segv_and_exit;

	/* 2. Restore the state */
	up_psr = regs->psr;
	err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));

	/* User can only change condition codes and FPU enabling in %psr. */
	regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
		  | (regs->psr & (PSR_ICC | PSR_EF));

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (fpu_save)
		err |= restore_fpu_state(regs, fpu_save);
	err |= __get_user(rwin_save, &sf->rwin_save);
	if (rwin_save)
		err |= restore_rwin_state(rwin_save);

	/* This is pretty much atomic, no amount locking would prevent
	 * the races which exist anyways.
	 */
	err |= __get_user(set.sig[0], &sf->info.si_mask);
	err |= __copy_from_user(&set.sig[1], &sf->extramask,
			        (_NSIG_WORDS-1) * sizeof(unsigned int));
			   
	if (err)
		goto segv_and_exit;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;

segv_and_exit:
	force_sig(SIGSEGV, current);
}
Beispiel #6
0
int
#else
void
#endif
sis1100_irq_thread(void* data)
{
    struct sis1100_softc* sc=(struct sis1100_softc*)data;
    enum handlercomm command;
    DECLARE_SPINLOCKFLAGS(flags);

#ifdef __linux__
#if LINUX_VERSION_CODE < 0x20600
    daemonize();
    snprintf(current->comm, sizeof(current->comm), "sis1100_%02d", sc->unit);
    SPIN_LOCK_IRQSAVE(current->sigmask_lock, flags);
    sigemptyset(&current->blocked);
    recalc_sigpending(current);
    SPIN_UNLOCK_IRQRESTORE(current->sigmask_lock, flags);
#endif
#endif /*__linux__*/

    while (1) {
#ifdef __NetBSD__
        tsleep(&sc->handler_wait, PCATCH, "thread_vmeirq", 0);
#elif __linux__
        /* prepare to sleep */
	__set_current_state(TASK_INTERRUPTIBLE);
        /* don't sleep if command!=0 */
        if (sc->handlercommand.command)
	    __set_current_state(TASK_RUNNING);
        else
            schedule();
#endif

        if (kthread_should_stop())
            return 0;

        SPIN_LOCK_IRQSAVE(sc->handlercommand.lock, flags);
        command=sc->handlercommand.command;
        sc->handlercommand.command=0;
        SPIN_UNLOCK_IRQRESTORE(sc->handlercommand.lock, flags);

#if 0
        pERROR(sc, "irq_thread: command=0x%x", command);
#endif

        _sis1100_irq_thread(sc, command);

#ifdef __linux__
	if (signal_pending (current)) {
	    SPIN_LOCK_IRQSAVE(current->SIGMASK_LOCK, flags);
	    flush_signals(current);
	    SPIN_UNLOCK_IRQRESTORE(current->SIGMASK_LOCK, flags);
	}
#endif /*__linux__*/

    }
#ifdef __linux__
    return 0;
#endif
}
Beispiel #7
0
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
	struct ucontext __user *ucp = (struct ucontext __user *)
		regs->u_regs[UREG_I0];
	mc_gregset_t __user *grp;
	unsigned long pc, npc, tstate;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	flush_user_windows();
	if (get_thread_wsaved()					||
	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
	    (!__access_ok(ucp, sizeof(*ucp))))
		goto do_sigsegv;
	grp  = &ucp->uc_mcontext.mc_gregs;
	err  = __get_user(pc, &((*grp)[MC_PC]));
	err |= __get_user(npc, &((*grp)[MC_NPC]));
	if (err || ((pc | npc) & 3))
		goto do_sigsegv;
	if (regs->u_regs[UREG_I1]) {
		sigset_t set;

		if (_NSIG_WORDS == 1) {
			if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
				goto do_sigsegv;
		} else {
			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
				goto do_sigsegv;
		}
		sigdelsetmask(&set, ~_BLOCKABLE);
		spin_lock_irq(&current->sighand->siglock);
		current->blocked = set;
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}
	if (test_thread_flag(TIF_32BIT)) {
		pc &= 0xffffffff;
		npc &= 0xffffffff;
	}
	regs->tpc = pc;
	regs->tnpc = npc;
	err |= __get_user(regs->y, &((*grp)[MC_Y]));
	err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
	err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
	err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
	err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
	err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
	err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
	err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
	err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
	err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
	err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
	err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
	err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
	err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
	err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
	err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
	err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));

	err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
	err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
	err |= __put_user(fp,
	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __put_user(i7,
	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));

	err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
	if (fenab) {
		unsigned long *fpregs = current_thread_info()->fpregs;
		unsigned long fprs;
		
		fprs_write(0);
		err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
		if (fprs & FPRS_DL)
			err |= copy_from_user(fpregs,
					      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
					      (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_from_user(fpregs+16,
			 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
			 (sizeof(unsigned int) * 32));
		err |= __get_user(current_thread_info()->xfsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
		err |= __get_user(current_thread_info()->gsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
		regs->tstate &= ~TSTATE_PEF;
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	force_sig(SIGSEGV, current);
}
Beispiel #8
0
/**
 * request_module - try to load a kernel module
 * @module_name: Name of module
 *
 * Load a module using the user mode module loader. The function returns
 * zero on success or a negative errno code on failure. Note that a
 * successful module load does not mean the module did not then unload
 * and exit on an error of its own. Callers must check that the service
 * they requested is now available not blindly invoke it.
 *
 * If module auto-loading support is disabled then this function
 * becomes a no-operation.
 */
int request_module(const char * module_name)
{
    pid_t pid;
    int waitpid_result;
    sigset_t tmpsig;
    int i;
    static atomic_t kmod_concurrent = ATOMIC_INIT(0);
#define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
    static int kmod_loop_msg;

    /* Don't allow request_module() before the root fs is mounted!  */
    if ( ! current->fs->root )
    {
        printk(KERN_ERR "request_module[%s]: Root fs not mounted\n",
               module_name);
        return -EPERM;
    }

    /* If modprobe needs a service that is in a module, we get a recursive
     * loop.  Limit the number of running kmod threads to max_threads/2 or
     * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
     * would be to run the parents of this process, counting how many times
     * kmod was invoked.  That would mean accessing the internals of the
     * process tables to get the command line, proc_pid_cmdline is static
     * and it is not worth changing the proc code just to handle this case.
     * KAO.
     */
    i = max_threads/2;
    if (i > MAX_KMOD_CONCURRENT)
        i = MAX_KMOD_CONCURRENT;
    atomic_inc(&kmod_concurrent);
    if (atomic_read(&kmod_concurrent) > i)
    {
        if (kmod_loop_msg++ < 5)
            printk(KERN_ERR
                   "kmod: runaway modprobe loop assumed and stopped\n");
        atomic_dec(&kmod_concurrent);
        return -ENOMEM;
    }

    pid = kernel_thread(exec_modprobe, (void*) module_name, 0);
    if (pid < 0)
    {
        printk(KERN_ERR "request_module[%s]: fork failed, errno %d\n", module_name, -pid);
        atomic_dec(&kmod_concurrent);
        return pid;
    }

    /* Block everything but SIGKILL/SIGSTOP */
    spin_lock_irq(&current->sigmask_lock);
    tmpsig = current->blocked;
    siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
    recalc_sigpending(current);
    spin_unlock_irq(&current->sigmask_lock);

    waitpid_result = waitpid(pid, NULL, __WCLONE);
    atomic_dec(&kmod_concurrent);

    /* Allow signals again.. */
    spin_lock_irq(&current->sigmask_lock);
    current->blocked = tmpsig;
    recalc_sigpending(current);
    spin_unlock_irq(&current->sigmask_lock);

    if (waitpid_result != pid)
    {
        printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
               module_name, pid, -waitpid_result);
    }
    return 0;
}
Beispiel #9
0
static int do_ncp_rpc_call(struct ncp_server *server, int size,
		struct ncp_reply_header* reply_buf, int max_reply_size)
{
	struct file *file;
	struct inode *inode;
	struct socket *sock;
	mm_segment_t fs;
	int result;
	char *start = server->packet;
	poll_table wait_table;
	struct poll_table_entry entry;
	int init_timeout, max_timeout;
	int timeout;
	int retrans;
	int major_timeout_seen;
	int acknowledge_seen;
	int n;
	sigset_t old_set;
	unsigned long mask, flags;

	/* We have to check the result, so store the complete header */
	struct ncp_request_header request =
	*((struct ncp_request_header *) (server->packet));

	struct ncp_reply_header reply;

	file = server->ncp_filp;
	inode = file->f_dentry->d_inode;
	sock = &inode->u.socket_i;
	/* N.B. this isn't needed ... check socket type? */
	if (!sock) {
		printk(KERN_ERR "ncp_rpc_call: socki_lookup failed\n");
		return -EBADF;
	}

	init_timeout = server->m.time_out;
	max_timeout = NCP_MAX_RPC_TIMEOUT;
	retrans = server->m.retry_count;
	major_timeout_seen = 0;
	acknowledge_seen = 0;

	spin_lock_irqsave(&current->sigmask_lock, flags);
	old_set = current->blocked;
	mask = sigmask(SIGKILL) | sigmask(SIGSTOP);
	if (server->m.flags & NCP_MOUNT_INTR) {
		/* FIXME: This doesn't seem right at all.  So, like,
		   we can't handle SIGINT and get whatever to stop?
		   What if we've blocked it ourselves?  What about
		   alarms?  Why, in fact, are we mucking with the
		   sigmask at all? -- r~ */
		if (current->sig->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
			mask |= sigmask(SIGINT);
		if (current->sig->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
			mask |= sigmask(SIGQUIT);
	}
	siginitsetinv(&current->blocked, mask);
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, flags);

	fs = get_fs();
	set_fs(get_ds());
	for (n = 0, timeout = init_timeout;; n++, timeout <<= 1) {
		/*
		DDPRINTK(KERN_DEBUG "ncpfs: %08lX:%02X%02X%02X%02X%02X%02X:%04X\n",
			 htonl(server->m.serv_addr.sipx_network),
			 server->m.serv_addr.sipx_node[0],
			 server->m.serv_addr.sipx_node[1],
			 server->m.serv_addr.sipx_node[2],
			 server->m.serv_addr.sipx_node[3],
			 server->m.serv_addr.sipx_node[4],
			 server->m.serv_addr.sipx_node[5],
			 ntohs(server->m.serv_addr.sipx_port));
		*/
		DDPRINTK(KERN_DEBUG "ncpfs: req.typ: %04X, con: %d, "
			 "seq: %d",
			 request.type,
			 (request.conn_high << 8) + request.conn_low,
			 request.sequence);
		DDPRINTK(KERN_DEBUG " func: %d\n",
			 request.function);

		result = _send(sock, (void *) start, size);
		if (result < 0) {
			printk(KERN_ERR "ncp_rpc_call: send error = %d\n", result);
			break;
		}
	      re_select:
		wait_table.nr = 0;
		wait_table.entry = &entry;
		current->state = TASK_INTERRUPTIBLE;
		if (!(file->f_op->poll(file, &wait_table) & POLLIN)) {
			int timed_out;
			if (timeout > max_timeout) {
				/* JEJB/JSP 2/7/94
				 * This is useful to see if the system is
				 * hanging */
				if (acknowledge_seen == 0) {
					printk(KERN_WARNING "NCP max timeout\n");
				}
				timeout = max_timeout;
			}
			timed_out = !schedule_timeout(timeout);
			remove_wait_queue(entry.wait_address, &entry.wait);
			fput(file);
			current->state = TASK_RUNNING;
			if (signal_pending(current)) {
				result = -ERESTARTSYS;
				break;
			}
			if (timed_out) {
				if (n < retrans)
					continue;
				if (server->m.flags & NCP_MOUNT_SOFT) {
					printk(KERN_WARNING "NCP server not responding\n");
					result = -EIO;
					break;
				}
				n = 0;
				timeout = init_timeout;
				init_timeout <<= 1;
				if (!major_timeout_seen) {
					printk(KERN_WARNING "NCP server not responding\n");
				}
				major_timeout_seen = 1;
				continue;
			}
		} else if (wait_table.nr) {
			remove_wait_queue(entry.wait_address, &entry.wait);
			fput(file);
		}
		current->state = TASK_RUNNING;

		/* Get the header from the next packet using a peek, so keep it
		 * on the recv queue.  If it is wrong, it will be some reply
		 * we don't now need, so discard it */
		result = _recv(sock, (void *) &reply, sizeof(reply),
			       MSG_PEEK | MSG_DONTWAIT);
		if (result < 0) {
			if (result == -EAGAIN) {
				DDPRINTK(KERN_DEBUG "ncp_rpc_call: bad select ready\n");
				goto re_select;
			}
			if (result == -ECONNREFUSED) {
				DPRINTK(KERN_WARNING "ncp_rpc_call: server playing coy\n");
				goto re_select;
			}
			if (result != -ERESTARTSYS) {
				printk(KERN_ERR "ncp_rpc_call: recv error = %d\n",
				       -result);
			}
			break;
		}
		if ((result == sizeof(reply))
		    && (reply.type == NCP_POSITIVE_ACK)) {
			/* Throw away the packet */
			DPRINTK(KERN_DEBUG "ncp_rpc_call: got positive acknowledge\n");
			_recv(sock, (void *) &reply, sizeof(reply),
			      MSG_DONTWAIT);
			n = 0;
			timeout = max_timeout;
			acknowledge_seen = 1;
			goto re_select;
		}
		DDPRINTK(KERN_DEBUG "ncpfs: rep.typ: %04X, con: %d, tsk: %d,"
			 "seq: %d\n",
			 reply.type,
			 (reply.conn_high << 8) + reply.conn_low,
			 reply.task,
			 reply.sequence);

		if ((result >= sizeof(reply))
		    && (reply.type == NCP_REPLY)
		    && ((request.type == NCP_ALLOC_SLOT_REQUEST)
			|| ((reply.sequence == request.sequence)
			    && (reply.conn_low == request.conn_low)
/* seem to get wrong task from NW311 && (reply.task      == request.task) */
			    && (reply.conn_high == request.conn_high)))) {
			if (major_timeout_seen)
				printk(KERN_NOTICE "NCP server OK\n");
			break;
		}
		/* JEJB/JSP 2/7/94
		 * we have xid mismatch, so discard the packet and start
		 * again.  What a hack! but I can't call recvfrom with
		 * a null buffer yet. */
		_recv(sock, (void *) &reply, sizeof(reply), MSG_DONTWAIT);

		DPRINTK(KERN_WARNING "ncp_rpc_call: reply mismatch\n");
		goto re_select;
	}
	/* 
	 * we have the correct reply, so read into the correct place and
	 * return it
	 */
	result = _recv(sock, (void *)reply_buf, max_reply_size, MSG_DONTWAIT);
	if (result < 0) {
		printk(KERN_WARNING "NCP: notice message: result=%d\n", result);
	} else if (result < sizeof(struct ncp_reply_header)) {
		printk(KERN_ERR "NCP: just caught a too small read memory size..., "
		       "email to NET channel\n");
		printk(KERN_ERR "NCP: result=%d\n", result);
		result = -EIO;
	}

	spin_lock_irqsave(&current->sigmask_lock, flags);
	current->blocked = old_set;
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, flags);
	
	set_fs(fs);
	return result;
}
Beispiel #10
0
int affixd_thread(void *startup)
{
	int			ret = 0;
	struct pollfd		fds[2];
	__u8			msg[HCI_MAX_MSG_SIZE];
	__u8			event[HCI_MAX_EVENT_SIZE];

	DBFENTER;
	daemonize("affixd");
	allow_signal(SIGTERM);
	allow_signal(SIGUSR1);

	/*  Open HCI manager */
	fds[0].fd = hci_open_mgr();
	if (fds[0].fd < 0) {
		ret = fds[0].fd;
		fds[1].fd = -1;
		goto exit;
	}
	fds[0].events = POLLIN;
	
	fds[1].fd = hci_open_event();
	if (fds[1].fd < 0) {
		ret = fds[1].fd;
		goto exit;
	}
	fds[1].events = POLLIN;

	up(&exit_sema);
	
	hci_event_mask(fds[1].fd, ALL_EVENTS_MASK &
			~(COMMAND_COMPLETE_MASK | COMMAND_STATUS_MASK |
			  NUMBER_OF_COMPLETE_PACKETS_MASK | FLUSH_OCCURRED_MASK));
	module_put(THIS_MODULE);
	module_put(THIS_MODULE);
	module_put(THIS_MODULE);
	module_put(THIS_MODULE);
	for (;;) {
		ret = hci_poll(fds, 2, -1);
		if (ret > 0) {/* we have something */
			if ((fds[0].revents | fds[1].revents) & (POLLERR | POLLHUP | POLLNVAL))
				break;
			if (fds[1].revents) {
				if (fds[1].revents & POLLIN) { /* HCI event */
					int	devnum;
					hci_recv_event_any(fds[1].fd, &devnum, event, sizeof(event));
					event_handler((void*)event, devnum);
				}
			}
			if (fds[0].revents) {
				if (fds[0].revents & POLLIN) {/* MSG */
					btsys_recv(fds[0].fd, msg, sizeof(msg), 0);
					message_handler((void*)(msg+1));
				}
			}
		}
		if (signal_pending(current)) {
			DBPRT("Got a signal!!!\n");
//			if (sigismember(&current->pending.signal, SIGUSR1)) {
//				sigdelset(&current->pending.signal, SIGUSR1);
//			}
			flush_signals(current);	// flush pending signals
			spin_lock_irq(&current->sighand->siglock);
			recalc_sigpending();
			spin_unlock_irq(&current->sighand->siglock);
			if (mgr_exit)
				break;
		}
	}
	//try_module_get(THIS_MODULE);
	//try_module_get(THIS_MODULE);
	//try_module_get(THIS_MODULE);
	//try_module_get(THIS_MODULE);
exit:
	if (fds[0].fd >= 0)
		hci_close(fds[0].fd);
	if (fds[1].fd >= 0)
		hci_close(fds[1].fd);
	mgr_exit = ret;
	up(&exit_sema);
	DBFEXIT;
	return ret;
}
Beispiel #11
0
asmlinkage void
irix_sigreturn(struct pt_regs *regs)
{
	struct sigctx_irix5 __user *context, *magic;
	unsigned long umask, mask;
	u64 *fregs;
	u32 usedfp;
	int error, sig, i, base = 0;
	sigset_t blocked;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	if (regs->regs[2] == 1000)
		base = 1;

	context = (struct sigctx_irix5 __user *) regs->regs[base + 4];
	magic = (struct sigctx_irix5 __user *) regs->regs[base + 5];
	sig = (int) regs->regs[base + 6];
#ifdef DEBUG_SIG
	printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
	       current->comm, current->pid, context, magic, sig);
#endif
	if (!context)
		context = magic;
	if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
		goto badframe;

#ifdef DEBUG_SIG
	dump_irix5_sigctx(context);
#endif

	error = __get_user(regs->cp0_epc, &context->pc);
	error |= __get_user(umask, &context->rmask);

	mask = 2;
	for (i = 1; i < 32; i++, mask <<= 1) {
		if (umask & mask)
			error |= __get_user(regs->regs[i], &context->regs[i]);
	}
	error |= __get_user(regs->hi, &context->hi);
	error |= __get_user(regs->lo, &context->lo);

	error |= __get_user(usedfp, &context->usedfp);
	if ((umask & 1) && usedfp) {
		fregs = (u64 *) &current->thread.fpu;

		for(i = 0; i < 32; i++)
			error |= __get_user(fregs[i], &context->fpregs[i]);
		error |= __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
	}

	/* XXX do sigstack crapola here... XXX */

	error |= __copy_from_user(&blocked, &context->sigset, sizeof(blocked)) ? -EFAULT : 0;

	if (error)
		goto badframe;

	sigdelsetmask(&blocked, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = blocked;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/*
	 * Don't let your children do this ...
	 */
	if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
		do_syscall_trace(regs, 1);
	__asm__ __volatile__(
		"move\t$29,%0\n\t"
		"j\tsyscall_exit"
		:/* no outputs */
		:"r" (&regs));
		/* Unreached */

badframe:
	force_sig(SIGSEGV, current);
}
Beispiel #12
0
static int mtd_blktrans_thread(void *arg)
{
	struct mtd_blktrans_ops *tr = arg;
	struct request_queue *rq = tr->blkcore_priv->rq;

	/* we might get involved when memory gets low, so use PF_MEMALLOC */
	current->flags |= PF_MEMALLOC | PF_NOFREEZE;


	daemonize("%sd", tr->name);


	/* daemonize() doesn't do this for us since some kernel threads
	   actually want to deal with signals. We can't just call 
	   exit_sighand() since that'll cause an oops when we finally
	   do exit. */
	spin_lock_irq(&current->sighand->siglock);
	sigfillset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

#ifdef CONFIG_MOT_WFN473
        set_user_nice(current, -20);
#endif

	spin_lock_irq(rq->queue_lock);
		
	while (!tr->blkcore_priv->exiting) {
		struct request *req;
		struct mtd_blktrans_dev *dev;
		int res = 0;
		DECLARE_WAITQUEUE(wait, current);

		req = elv_next_request(rq);

		if (!req) {
			add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
			set_current_state(TASK_INTERRUPTIBLE);

			spin_unlock_irq(rq->queue_lock);

			schedule();
			remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);

			spin_lock_irq(rq->queue_lock);

			continue;
		}

		dev = req->rq_disk->private_data;
		tr = dev->tr;

		spin_unlock_irq(rq->queue_lock);

		down(&dev->sem);
		res = do_blktrans_request(tr, dev, req);
		up(&dev->sem);

		spin_lock_irq(rq->queue_lock);

		end_request(req, res);
	}
	spin_unlock_irq(rq->queue_lock);

	complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
}
Beispiel #13
0
/*
 * OK, we're invoking a handler
 */
static void
handle_signal(unsigned long sig, struct k_sigaction *ka,
	      siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
	      unsigned long *newspp, unsigned long frame)
{
	struct sigcontext_struct *sc;
        struct rt_sigframe *rt_sf;

	if (regs->trap == 0x0C00 /* System Call! */
	    && ((int)regs->result == -ERESTARTNOHAND ||
		((int)regs->result == -ERESTARTSYS &&
		 !(ka->sa.sa_flags & SA_RESTART))))
		regs->result = -EINTR;
        /* Set up Signal Frame */
     
	if (ka->sa.sa_flags & SA_SIGINFO) {
		/* Put a Real Time Context onto stack */
		*newspp -= sizeof(*rt_sf);
		rt_sf = (struct rt_sigframe *) *newspp;
		if (verify_area(VERIFY_WRITE, rt_sf, sizeof(*rt_sf)))
			goto badframe;
                

		if (__put_user((unsigned long) ka->sa.sa_handler, &rt_sf->uc.uc_mcontext.handler)
		    || __put_user(&rt_sf->info, &rt_sf->pinfo)
		    || __put_user(&rt_sf->uc, &rt_sf->puc)
		    /* Put the siginfo */
		    || __copy_to_user(&rt_sf->info, info, sizeof(*info))
		    /* Create the ucontext */
		    || __put_user(0, &rt_sf->uc.uc_flags)
		    || __put_user(0, &rt_sf->uc.uc_link)
		    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
		    || __put_user(sas_ss_flags(regs->gpr[1]), 
				  &rt_sf->uc.uc_stack.ss_flags)
		    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
		    || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset))
		    /* mcontext.regs points to preamble register frame */
		    || __put_user((struct pt_regs *)frame, &rt_sf->uc.uc_mcontext.regs)
		    || __put_user(sig, &rt_sf->uc.uc_mcontext.signal))
			goto badframe;
               
	} else {
	        /* Put another sigcontext on the stack */
                *newspp -= sizeof(*sc);
        	sc = (struct sigcontext_struct *) *newspp;
        	if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
	        	goto badframe;

        	if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
	           || __put_user(oldset->sig[0], &sc->oldmask)
#if _NSIG_WORDS > 1
	           || __put_user(oldset->sig[1], &sc->_unused[3])
#endif
	           || __put_user((struct pt_regs *)frame, &sc->regs)
	           || __put_user(sig, &sc->signal))
	        	goto badframe;
	}

	if (ka->sa.sa_flags & SA_ONESHOT)
		ka->sa.sa_handler = SIG_DFL;

	if (!(ka->sa.sa_flags & SA_NODEFER)) {
		spin_lock_irq(&current->sigmask_lock);
		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
		sigaddset(&current->blocked,sig);
		recalc_sigpending(current);
		spin_unlock_irq(&current->sigmask_lock);
	}
	return;

badframe:
#if DEBUG_SIG
	printk("badframe in handle_signal, regs=%p frame=%lx newsp=%lx\n",
	       regs, frame, *newspp);
	printk("sc=%p sig=%d ka=%p info=%p oldset=%p\n", sc, sig, ka, info, oldset);
#endif
	do_exit(SIGSEGV);
}
Beispiel #14
0
/*
 * Do a signal return; undo the signal stack.
 */
long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		   unsigned long r6, unsigned long r7, unsigned long r8,
		   struct pt_regs *regs)
{
	struct sigcontext_struct *sc, sigctx;
	struct sigregs *sr;
	long ret;
	elf_gregset_t saved_regs;  /* an array of ELF_NGREG unsigned longs */
	sigset_t set;
	unsigned long prevsp;

        sc = (struct sigcontext_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
		goto badframe;

	set.sig[0] = sigctx.oldmask;
#if _NSIG_WORDS > 1
	set.sig[1] = sigctx._unused[3];
#endif
	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sigmask_lock);
	current->blocked = set;
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	sc++;			/* Look at next sigcontext */
	if (sc == (struct sigcontext_struct *)(sigctx.regs)) {
		/* Last stacked signal - restore registers */
		sr = (struct sigregs *) sigctx.regs;
		if (regs->msr & MSR_FP )
			giveup_fpu(current);
		if (copy_from_user(saved_regs, &sr->gp_regs,
				   sizeof(sr->gp_regs)))
			goto badframe;
		saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
			| (saved_regs[PT_MSR] & MSR_USERCHANGE);
		saved_regs[PT_SOFTE] = regs->softe;
		memcpy(regs, saved_regs, GP_REGS_SIZE);

		if (copy_from_user(current->thread.fpr, &sr->fp_regs,
				   sizeof(sr->fp_regs)))
			goto badframe;

		ret = regs->result;

	} else {
		/* More signals to go */
		regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE;
		if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
			goto badframe;
		sr = (struct sigregs *) sigctx.regs;
		regs->gpr[3] = ret = sigctx.signal;
		regs->gpr[4] = (unsigned long) sc;
		regs->link = (unsigned long) &sr->tramp;
		regs->nip = sigctx.handler;

		if (get_user(prevsp, &sr->gp_regs[PT_R1])
		    || put_user(prevsp, (unsigned long *) regs->gpr[1]))
			goto badframe;
	}
	return ret;

badframe:
	do_exit(SIGSEGV);
}	
Beispiel #15
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	/*
	 * If the new process will be in a different pid or user namespace
	 * do not allow it to share a thread group or signal handlers or
	 * parent with the forking task.
	 */
	if (clone_flags & CLONE_SIGHAND) {
		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
		    (task_active_pid_ns(current) !=
				current->nsproxy->pid_ns_for_children))
			return ERR_PTR(-EINVAL);
	}

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (p->real_cred->user != INIT_USER &&
		    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
	p->flags |= PF_FORKNOEXEC;
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
	seqlock_init(&p->vtime_seqlock);
	p->vtime_snap = 0;
	p->vtime_snap_whence = VTIME_SLEEPING;
#endif

#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->start_time = ktime_get_ns();
	p->real_start_time = ktime_get_boot_ns();
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_threadgroup_lock;
	}
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
	p->hardirqs_enabled = 0;
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_BCACHE
	p->sequential_io	= 0;
	p->sequential_io_avg	= 0;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	retval = sched_fork(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_policy;

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_perf;
	/* copy all the process information */
	shm_init_task(p);
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns_for_children);
		if (!pid)
			goto bad_fork_cleanup_io;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->pid = pid_nr(pid);
	if (clone_flags & CLONE_THREAD) {
		p->exit_signal = -1;
		p->group_leader = current->group_leader;
		p->tgid = current->tgid;
	} else {
		if (clone_flags & CLONE_PARENT)
			p->exit_signal = current->group_leader->exit_signal;
		else
			p->exit_signal = (clone_flags & CSIGNAL);
		p->group_leader = p;
		p->tgid = p->pid;
	}

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	p->pdeath_signal = 0;
	INIT_LIST_HEAD(&p->thread_group);
	p->task_works = NULL;

	/*
	 * Make it visible to the rest of the system, but dont wake it up yet.
	 * Need tasklist lock for parent etc handling!
	 */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Copy seccomp details explicitly here, in case they were changed
	 * before holding sighand lock.
	 */
	copy_seccomp(p);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
	*/
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		init_task_pid(p, PIDTYPE_PID, pid);
		if (thread_group_leader(p)) {
			init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
			init_task_pid(p, PIDTYPE_SID, task_session(current));

			if (is_child_reaper(pid)) {
				ns_of_pid(pid)->child_reaper = p;
				p->signal->flags |= SIGNAL_UNKILLABLE;
			}

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			attach_pid(p, PIDTYPE_PGID);
			attach_pid(p, PIDTYPE_SID);
			__this_cpu_inc(process_counts);
		} else {
			current->signal->nr_threads++;
			atomic_inc(&current->signal->live);
			atomic_inc(&current->signal->sigcnt);
			list_add_tail_rcu(&p->thread_group,
					  &p->group_leader->thread_group);
			list_add_tail_rcu(&p->thread_node,
					  &p->signal->thread_head);
		}
		attach_pid(p, PIDTYPE_PID);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	syscall_tracepoint_update(p);
	write_unlock_irq(&tasklist_lock);

	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);
	uprobe_copy_process(p, clone_flags);
#ifdef CONFIG_RKP_KDP
	if(rkp_cred_enable)
		rkp_assign_pgd(p);
#endif/*CONFIG_RKP_KDP*/
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_perf:
	perf_event_free_task(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Beispiel #16
0
static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
		sigset_t *oldset, struct pt_regs *regs)
{
	int ret;

#ifdef DEBUG_SIG
	printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
		current->pid, sig,
		regs->rip, regs->rsp, regs);
#endif

	/* Are we from a system call? */
	if ((long)regs->orig_rax >= 0) {
		/* If so, check system call restarting.. */
		switch (regs->rax) {
		        case -ERESTART_RESTARTBLOCK:
			case -ERESTARTNOHAND:
				regs->rax = -EINTR;
				break;

			case -ERESTARTSYS:
				if (!(ka->sa.sa_flags & SA_RESTART)) {
					regs->rax = -EINTR;
					break;
				}
				/* fallthrough */
			case -ERESTARTNOINTR:
				regs->rax = regs->orig_rax;
				regs->rip -= 2;
				break;
		}
	}

	/*
	 * If TF is set due to a debugger (PT_DTRACE), clear the TF
	 * flag so that register information in the sigcontext is
	 * correct.
	 */
	if (unlikely(regs->eflags & TF_MASK)) {
		if (likely(current->ptrace & PT_DTRACE)) {
			current->ptrace &= ~PT_DTRACE;
			regs->eflags &= ~TF_MASK;
		}
	}

#ifdef CONFIG_IA32_EMULATION
	if (test_thread_flag(TIF_IA32)) {
		if (ka->sa.sa_flags & SA_SIGINFO)
			ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
		else
			ret = ia32_setup_frame(sig, ka, oldset, regs);
	} else 
#endif
	ret = setup_rt_frame(sig, ka, info, oldset, regs);

	if (ret == 0) {
		spin_lock_irq(&current->sighand->siglock);
		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
		if (!(ka->sa.sa_flags & SA_NODEFER))
			sigaddset(&current->blocked,sig);
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}

	return ret;
}
Beispiel #17
0
static int cachemiss_thread (void *data)
{
	tux_req_t *req;
	struct k_sigaction *ka;
	DECLARE_WAITQUEUE(wait, current);
	iothread_t *iot = data;
	int nr = iot->ti->cpu, wake_up;

	Dprintk("iot %p/%p got started.\n", iot, current);
	drop_permissions();

	spin_lock(&iot->async_lock);
	iot->threads++;
	sprintf(current->comm, "async IO %d/%d", nr, iot->threads);


	spin_lock_irq(&current->sighand->siglock);
	ka = current->sighand->action + SIGCHLD-1;
	ka->sa.sa_handler = SIG_IGN;
	siginitsetinv(&current->blocked, sigmask(SIGCHLD));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_unlock(&iot->async_lock);
#ifdef CONFIG_SMP
	{
		cpumask_t mask;

		if (cpu_isset(nr, cpu_online_map)) {
			cpus_clear(mask);
			cpu_set(nr, mask);
			set_cpus_allowed(current, mask);
		}

	}
#endif

	add_wait_queue_exclusive(&iot->async_sleep, &wait);

	for (;;) {
		while (!list_empty(&iot->async_queue) &&
				(req = get_cachemiss(iot))) {

			if (!req->atom_idx) {
				add_tux_atom(req, flush_request);
				add_req_to_workqueue(req);
				continue;
			}
			tux_schedule_atom(req, 1);
			if (signal_pending(current))
				flush_all_signals();
		}
		if (signal_pending(current))
			flush_all_signals();
		if (!list_empty(&iot->async_queue))
			continue;
		if (iot->shutdown) {
			Dprintk("iot %p/%p got shutdown!\n", iot, current);
			break;
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&iot->async_queue)) {
			Dprintk("iot %p/%p going to sleep.\n", iot, current);
			schedule();
			Dprintk("iot %p/%p got woken up.\n", iot, current);
		}
		__set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&iot->async_sleep, &wait);

	wake_up = 0;
	spin_lock(&iot->async_lock);
	if (!--iot->threads)
		wake_up = 1;
	spin_unlock(&iot->async_lock);
	Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
	if (wake_up) {
		Dprintk("iot %p/%p waking up master.\n", iot, current);
		wake_up(&iot->wait_shutdown);
	}

	return 0;
}
Beispiel #18
0
static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
{
	va_list ap;
	int tag, err;
	struct p9_req_t *req;
	unsigned long flags;
	int sigpending;
	int flushed = 0;

	P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);

	if (c->status != Connected)
		return ERR_PTR(-EIO);

	if (signal_pending(current)) {
		sigpending = 1;
		clear_thread_flag(TIF_SIGPENDING);
	} else
		sigpending = 0;

	tag = P9_NOTAG;
	if (type != P9_TVERSION) {
		tag = p9_idpool_get(c->tagpool);
		if (tag < 0)
			return ERR_PTR(-ENOMEM);
	}

	req = p9_tag_alloc(c, tag);
	if (IS_ERR(req))
		return req;

	/* marshall the data */
	p9pdu_prepare(req->tc, tag, type);
	va_start(ap, fmt);
	err = p9pdu_vwritef(req->tc, c->dotu, fmt, ap);
	va_end(ap);
	p9pdu_finalize(req->tc);

	err = c->trans_mod->request(c, req);
	if (err < 0) {
		c->status = Disconnected;
		goto reterr;
	}

	/* if it was a flush we just transmitted, return our tag */
	if (type == P9_TFLUSH)
		return req;
again:
	P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
	err = wait_event_interruptible(*req->wq,
						req->status >= REQ_STATUS_RCVD);
	P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d (flushed=%d)\n",
						req->wq, tag, err, flushed);

	if (req->status == REQ_STATUS_ERROR) {
		P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
		err = req->t_err;
	} else if (err == -ERESTARTSYS && flushed) {
		P9_DPRINTK(P9_DEBUG_MUX, "flushed - going again\n");
		goto again;
	} else if (req->status == REQ_STATUS_FLSHD) {
		P9_DPRINTK(P9_DEBUG_MUX, "flushed - erestartsys\n");
		err = -ERESTARTSYS;
	}

	if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) {
		P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
		spin_lock_irqsave(&c->lock, flags);
		if (req->status == REQ_STATUS_SENT)
			req->status = REQ_STATUS_FLSH;
		spin_unlock_irqrestore(&c->lock, flags);
		sigpending = 1;
		flushed = 1;
		clear_thread_flag(TIF_SIGPENDING);

		if (c->trans_mod->cancel(c, req)) {
			err = p9_client_flush(c, req);
			if (err == 0)
				goto again;
		}
	}

	if (sigpending) {
		spin_lock_irqsave(&current->sighand->siglock, flags);
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, flags);
	}

	if (err < 0)
		goto reterr;

	err = p9_check_errors(c, req);
	if (!err) {
		P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type);
		return req;
	}

reterr:
	P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
									err);
	p9_free_req(c, req);
	return ERR_PTR(err);
}
Beispiel #19
0
void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	unsigned long tpc, tnpc, tstate;
	__siginfo_fpu_t __user *fpu_save;
	mm_segment_t old_fs;
	sigset_t set;
	stack_t st;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack ();
	sf = (struct rt_signal_frame __user *)
		(regs->u_regs [UREG_FP] + STACK_BIAS);

	/* 1. Make sure we are not getting garbage from the user */
	if (((unsigned long) sf) & 3)
		goto segv;

	err = get_user(tpc, &sf->regs.tpc);
	err |= __get_user(tnpc, &sf->regs.tnpc);
	if (test_thread_flag(TIF_32BIT)) {
		tpc &= 0xffffffff;
		tnpc &= 0xffffffff;
	}
	err |= ((tpc | tnpc) & 3);

	/* 2. Restore the state */
	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(tstate, &sf->regs.tstate);
	err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));

	/* User can only change condition codes and %asi in %tstate. */
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (fpu_save)
		err |= restore_fpu_state(regs, &sf->fpu_state);

	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
	
	if (err)
		goto segv;
		
	regs->tpc = tpc;
	regs->tnpc = tnpc;
	
	/* It is more difficult to avoid calling this function than to
	   call it and ignore errors.  */
	old_fs = get_fs();
	set_fs(KERNEL_DS);
	do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
	set_fs(old_fs);

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;
segv:
	force_sig(SIGSEGV, current);
}
Beispiel #20
0
static int ncp_do_request(struct ncp_server *server, int size,
		void* reply, int max_reply_size)
{
	struct file *file;
	struct socket *sock;
	int result;

	if (server->lock == 0) {
		printk(KERN_ERR "ncpfs: Server not locked!\n");
		return -EIO;
	}
	if (!ncp_conn_valid(server)) {
		return -EIO;
	}
#ifdef CONFIG_NCPFS_PACKET_SIGNING
	if (server->sign_active)
	{
		sign_packet(server, &size);
	}
#endif /* CONFIG_NCPFS_PACKET_SIGNING */
	file = server->ncp_filp;
	sock = &file->f_dentry->d_inode->u.socket_i;
	/* N.B. this isn't needed ... check socket type? */
	if (!sock) {
		printk(KERN_ERR "ncp_rpc_call: socki_lookup failed\n");
		result = -EBADF;
	} else {
		mm_segment_t fs;
		sigset_t old_set;
		unsigned long mask, flags;

		spin_lock_irqsave(&current->sigmask_lock, flags);
		old_set = current->blocked;
		if (current->flags & PF_EXITING)
			mask = 0;
		else
			mask = sigmask(SIGKILL);
		if (server->m.flags & NCP_MOUNT_INTR) {
			/* FIXME: This doesn't seem right at all.  So, like,
			   we can't handle SIGINT and get whatever to stop?
			   What if we've blocked it ourselves?  What about
			   alarms?  Why, in fact, are we mucking with the
			   sigmask at all? -- r~ */
			if (current->sig->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
				mask |= sigmask(SIGINT);
			if (current->sig->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
				mask |= sigmask(SIGQUIT);
		}
		siginitsetinv(&current->blocked, mask);
		recalc_sigpending(current);
		spin_unlock_irqrestore(&current->sigmask_lock, flags);
		
		fs = get_fs();
		set_fs(get_ds());

		if (sock->type == SOCK_STREAM)
			result = do_ncp_tcp_rpc_call(server, size, reply, max_reply_size);
		else
			result = do_ncp_rpc_call(server, size, reply, max_reply_size);

		set_fs(fs);

		spin_lock_irqsave(&current->sigmask_lock, flags);
		current->blocked = old_set;
		recalc_sigpending(current);
		spin_unlock_irqrestore(&current->sigmask_lock, flags);
	}

	DDPRINTK("do_ncp_rpc_call returned %d\n", result);

	if (result < 0) {
		/* There was a problem with I/O, so the connections is
		 * no longer usable. */
		ncp_invalidate_conn(server);
	}
	return result;
}
Beispiel #21
0
/**
 * ccs_load_policy - Run external policy loader to load policy.
 *
 * @filename: The program about to start.
 *
 * This function checks whether @filename is /sbin/init , and if so
 * invoke /sbin/ccs-init and wait for the termination of /sbin/ccs-init
 * and then continues invocation of /sbin/init.
 * /sbin/ccs-init reads policy files in /etc/ccs/ directory and
 * writes to /proc/ccs/ interfaces.
 *
 * Returns nothing.
 */
static void ccs_load_policy(const char *filename)
{
	if (ccsecurity_ops.disabled)
		return;
	if (strcmp(filename, "/sbin/init") &&
	    strcmp(filename, CONFIG_CCSECURITY_ALTERNATIVE_TRIGGER))
		return;
	if (!ccs_policy_loader_exists())
		return;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
	{
		char *argv[2];
		char *envp[3];
		printk(KERN_INFO "Calling %s to load policy. Please wait.\n",
		       ccs_loader);
		argv[0] = (char *) ccs_loader;
		argv[1] = NULL;
		envp[0] = "HOME=/";
		envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[2] = NULL;
		call_usermodehelper(argv[0], argv, envp, 1);
	}
#elif defined(TASK_DEAD)
	{
		/* Copied from kernel/kmod.c */
		struct task_struct *task = current;
		pid_t pid = kernel_thread(ccs_run_loader, NULL, 0);
		sigset_t tmpsig;
		spin_lock_irq(&task->sighand->siglock);
		tmpsig = task->blocked;
		siginitsetinv(&task->blocked,
			      sigmask(SIGKILL) | sigmask(SIGSTOP));
		recalc_sigpending();
		spin_unlock_irq(&task->sighand->siglock);
		if (pid >= 0)
			waitpid(pid, NULL, __WCLONE);
		spin_lock_irq(&task->sighand->siglock);
		task->blocked = tmpsig;
		recalc_sigpending();
		spin_unlock_irq(&task->sighand->siglock);
	}
#else
	{
		/* Copied from kernel/kmod.c */
		struct task_struct *task = current;
		pid_t pid = kernel_thread(ccs_run_loader, NULL, 0);
		sigset_t tmpsig;
		spin_lock_irq(&task->sigmask_lock);
		tmpsig = task->blocked;
		siginitsetinv(&task->blocked,
			      sigmask(SIGKILL) | sigmask(SIGSTOP));
		recalc_sigpending(task);
		spin_unlock_irq(&task->sigmask_lock);
		if (pid >= 0)
			waitpid(pid, NULL, __WCLONE);
		spin_lock_irq(&task->sigmask_lock);
		task->blocked = tmpsig;
		recalc_sigpending(task);
		spin_unlock_irq(&task->sigmask_lock);
	}
#endif
	if (ccsecurity_ops.check_profile)
		ccsecurity_ops.check_profile();
	else
		panic("Failed to load policy.");
}
/*
 * Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 *
 * Note that we go through the signals twice: once to check the signals that
 * the kernel can handle, and then we build all the user-level signal handling
 * stack-frames in one go after that.
 */
int do_signal(struct pt_regs *regs, sigset_t *oldset)
{
	siginfo_t info;
	struct k_sigaction *ka;

	/*
	 * We want the common case to go fast, which
	 * is why we may in certain cases get here from
	 * kernel mode. Just return without doing anything
	 * if so.
	 */
	if (!user_mode(regs))
		return 1;

	if (!oldset)
		oldset = &current->blocked;

	for (;;) {
		unsigned long signr;

		spin_lock_irq(&current->sigmask_lock);
		signr = dequeue_signal(&current->blocked, &info);
		spin_unlock_irq(&current->sigmask_lock);

		if (!signr)
			break;

		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
			/* Let the debugger run.  */
			current->exit_code = signr;
			current->state = TASK_STOPPED;
			notify_parent(current, SIGCHLD);
			schedule();

			/* We're back.  Did the debugger cancel the sig?  */
			if (!(signr = current->exit_code))
				continue;
			current->exit_code = 0;

			/* The debugger continued.  Ignore SIGSTOP.  */
			if (signr == SIGSTOP)
				continue;

			/* Update the siginfo structure.  Is this good?  */
			if (signr != info.si_signo) {
				info.si_signo = signr;
				info.si_errno = 0;
				info.si_code = SI_USER;
				info.si_pid = current->p_pptr->pid;
				info.si_uid = current->p_pptr->uid;
			}

			/* If the (new) signal is now blocked, requeue it.  */
			if (sigismember(&current->blocked, signr)) {
				send_sig_info(signr, &info, current);
				continue;
			}
		}

		ka = &current->sig->action[signr-1];
		if (ka->sa.sa_handler == SIG_IGN) {
			if (signr != SIGCHLD)
				continue;
			/* Check for SIGCHLD: it's special.  */
			while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
				/* nothing */;
			continue;
		}

		if (ka->sa.sa_handler == SIG_DFL) {
			int exit_code = signr;

			/* Init gets no signals it doesn't want.  */
			if (current->pid == 1)
				continue;

			switch (signr) {
			case SIGCONT: case SIGCHLD: case SIGWINCH:
				continue;

			case SIGTSTP: case SIGTTIN: case SIGTTOU:
				if (is_orphaned_pgrp(current->pgrp))
				{
					continue;
				}
				/* FALLTHRU */

			case SIGSTOP:
				current->state = TASK_STOPPED;
				current->exit_code = signr;
				if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
					notify_parent(current, SIGCHLD);
				schedule();
				continue;

			case SIGQUIT: case SIGILL: case SIGTRAP:
			case SIGABRT: case SIGFPE: case SIGSEGV:
			case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
				if (do_coredump(signr, regs))
					exit_code |= 0x80;
				/* FALLTHRU */

			default:
				sigaddset(&current->pending.signal, signr);
				recalc_sigpending(current);
				current->flags |= PF_SIGNALED;
				do_exit(exit_code);
				/* NOTREACHED */
			}
		}

		/* Whee!  Actually deliver the signal.  */
		handle_signal(signr, ka, &info, oldset, regs);
		return 1;
	}

	/* Did we come from a system call? */
	if (PT_REGS_SYSCALL (regs)) {
		/* Restart the system call - no handlers present */
		if (regs->gpr[GPR_RVAL] == -ERESTARTNOHAND ||
		    regs->gpr[GPR_RVAL] == -ERESTARTSYS ||
		    regs->gpr[GPR_RVAL] == -ERESTARTNOINTR) {
			regs->gpr[12] = PT_REGS_SYSCALL (regs);
			regs->pc -= 8; /* 4 + 8 for microblaze return offset wierdness */
		}
	}
	return 0;
}
Beispiel #23
0
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	unsigned int psr, pc, npc;
	__siginfo_fpu_t __user *fpu_save;
	__siginfo_rwin_t __user *rwin_save;
	mm_segment_t old_fs;
	sigset_t set;
	stack_t st;
	int err;

	synchronize_user_stack();
	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
	    (((unsigned long) sf) & 0x03))
		goto segv;

	err = __get_user(pc, &sf->regs.pc);
	err |= __get_user(npc, &sf->regs.npc);
	err |= ((pc | npc) & 0x03);

	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(psr, &sf->regs.psr);

	err |= __copy_from_user(&regs->u_regs[UREG_G1],
				&sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));

	regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (!err && fpu_save)
		err |= restore_fpu_state(regs, fpu_save);
	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	
	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
	
	if (err)
		goto segv;
		
	regs->pc = pc;
	regs->npc = npc;
	
	/* It is more difficult to avoid calling this function than to
	 * call it and ignore errors.
	 */
	old_fs = get_fs();
	set_fs(KERNEL_DS);
	do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
	set_fs(old_fs);

	err |= __get_user(rwin_save, &sf->rwin_save);
	if (!err && rwin_save) {
		if (restore_rwin_state(rwin_save))
			goto segv;
	}

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;
segv:
	force_sig(SIGSEGV, current);
}
Beispiel #24
0
static void
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
	      sigset_t *oldset, struct pt_regs *regs)
{
	/* Are we from a system call? */
	if (regs->tra >= 0) {
		/* If so, check system call restarting.. */
		switch (regs->regs[0]) {
			case -ERESTART_RESTARTBLOCK:
				regs->regs[0] = -EINTR;
				break;

			case -ERESTARTNOHAND:
				regs->regs[0] = -EINTR;
				break;

			case -ERESTARTSYS:
				if (!(ka->sa.sa_flags & SA_RESTART)) {
					regs->regs[0] = -EINTR;
					break;
				}
			/* fallthrough */
			case -ERESTARTNOINTR:
				regs->pc -= 2;
		}
	} else {
		/* gUSA handling */
#ifdef CONFIG_PREEMPT
		unsigned long flags;

		local_irq_save(flags);
#endif
		if (regs->regs[15] >= 0xc0000000) {
			int offset = (int)regs->regs[15];

			/* Reset stack pointer: clear critical region mark */
			regs->regs[15] = regs->regs[1];
			if (regs->pc < regs->regs[0])
				/* Go to rewind point #1 */
				regs->pc = regs->regs[0] + offset - 2;
		}
#ifdef CONFIG_PREEMPT
		local_irq_restore(flags);
#endif
	}

	/* Set up the stack frame */
	if (ka->sa.sa_flags & SA_SIGINFO)
		setup_rt_frame(sig, ka, info, oldset, regs);
	else
		setup_frame(sig, ka, oldset, regs);

	if (ka->sa.sa_flags & SA_ONESHOT)
		ka->sa.sa_handler = SIG_DFL;

	if (!(ka->sa.sa_flags & SA_NODEFER)) {
		spin_lock_irq(&current->sighand->siglock);
		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
		sigaddset(&current->blocked,sig);
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}
}
Beispiel #25
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *parent_tidptr,
					int __user *child_tidptr,
					int pid)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

#ifdef CONFIG_TRACE_IRQFLAGS
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	p->pid = pid;
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup_delays_binfmt;

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
 	p->sched_time = 0;
	p->rchar = 0;		/* I/O counter: bytes read */
	p->wchar = 0;		/* I/O counter: bytes written */
	p->syscr = 0;		/* I/O counter: read syscalls */
	p->syscw = 0;		/* I/O counter: write syscalls */
	acct_clear_integrals(p);

 	p->it_virt_expires = cputime_zero;
	p->it_prof_expires = cputime_zero;
 	p->it_sched_expires = 0;
 	INIT_LIST_HEAD(&p->cpu_timers[0]);
 	INIT_LIST_HEAD(&p->cpu_timers[1]);
 	INIT_LIST_HEAD(&p->cpu_timers[2]);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->security = NULL;
	p->io_context = NULL;
	p->io_wait = NULL;
	p->audit_context = NULL;
	cpuset_fork(p);
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cpuset;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
	p->hardirqs_enabled = 0;
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

	rt_mutex_init_task(p);

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_namespace(clone_flags, p)))
		goto bad_fork_cleanup_keys;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;

	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
 	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_cleanup_namespace;
	}

	if (clone_flags & CLONE_THREAD) {
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);

		if (!cputime_eq(current->signal->it_virt_expires,
				cputime_zero) ||
		    !cputime_eq(current->signal->it_prof_expires,
				cputime_zero) ||
		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
		    !list_empty(&current->signal->cpu_timers[0]) ||
		    !list_empty(&current->signal->cpu_timers[1]) ||
		    !list_empty(&current->signal->cpu_timers[2])) {
			/*
			 * Have child wake up on its first tick to check
			 * for process CPU timers.
			 */
			p->it_prof_expires = jiffies_to_cputime(1);
		}
	}

	/*
	 * inherit ioprio
	 */
	p->ioprio = current->ioprio;

	if (likely(p->pid)) {
		add_parent(p);
		if (unlikely(p->ptrace & PT_PTRACED))
			__ptrace_link(p, current->parent);

		if (thread_group_leader(p)) {
			p->signal->tty = current->signal->tty;
			p->signal->pgrp = process_group(current);
			p->signal->session = current->signal->session;
			attach_pid(p, PIDTYPE_PGID, process_group(p));
			attach_pid(p, PIDTYPE_SID, p->signal->session);

			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, p->pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	cleanup_signal(p);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
bad_fork_cleanup_cpuset:
#endif
	cpuset_exit(p);
bad_fork_cleanup_delays_binfmt:
	delayacct_tsk_free(p);
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Beispiel #26
0
static int ncp_do_request(struct ncp_server *server, int size,
                          void* reply, int max_reply_size)
{
    int result;

    if (server->lock == 0) {
        printk(KERN_ERR "ncpfs: Server not locked!\n");
        return -EIO;
    }
    if (!ncp_conn_valid(server)) {
        printk(KERN_ERR "ncpfs: Connection invalid!\n");
        return -EIO;
    }
    {
        mm_segment_t fs;
        sigset_t old_set;
        unsigned long mask, flags;

        spin_lock_irqsave(&current->sighand->siglock, flags);
        old_set = current->blocked;
        if (current->flags & PF_EXITING)
            mask = 0;
        else
            mask = sigmask(SIGKILL);
        if (server->m.flags & NCP_MOUNT_INTR) {
            /* FIXME: This doesn't seem right at all.  So, like,
               we can't handle SIGINT and get whatever to stop?
               What if we've blocked it ourselves?  What about
               alarms?  Why, in fact, are we mucking with the
               sigmask at all? -- r~ */
            if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
                mask |= sigmask(SIGINT);
            if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
                mask |= sigmask(SIGQUIT);
        }
        siginitsetinv(&current->blocked, mask);
        recalc_sigpending();
        spin_unlock_irqrestore(&current->sighand->siglock, flags);

        fs = get_fs();
        set_fs(get_ds());

        result = do_ncp_rpc_call(server, size, reply, max_reply_size);

        set_fs(fs);

        spin_lock_irqsave(&current->sighand->siglock, flags);
        current->blocked = old_set;
        recalc_sigpending();
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
    }

    DDPRINTK("do_ncp_rpc_call returned %d\n", result);

    if (result < 0) {
        /* There was a problem with I/O, so the connections is
         * no longer usable. */
        ncp_invalidate_conn(server);
    }
    return result;
}
/*
 * OK, we're invoking a handler
 */	
static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
	      siginfo_t *info, sigset_t *oldset,
	      struct pt_regs * regs, int syscall)
{
	struct thread_info *thread = current_thread_info();
	struct task_struct *tsk = current;
	int usig = sig;
	int ret;

	/*
	 * If we were from a system call, check for system call restarting...
	 */
	if (syscall) {
		switch (regs->ARM_r0) {
		case -ERESTART_RESTARTBLOCK:
		case -ERESTARTNOHAND:
			regs->ARM_r0 = -EINTR;
			break;
		case -ERESTARTSYS:
			if (!(ka->sa.sa_flags & SA_RESTART)) {
				regs->ARM_r0 = -EINTR;
				break;
			}
			/* fallthrough */
		case -ERESTARTNOINTR:
			setup_syscall_restart(regs);
		}
	}

	/*
	 * translate the signal
	 */
	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
		usig = thread->exec_domain->signal_invmap[usig];

	/*
	 * Set up the stack frame
	 */
	if (ka->sa.sa_flags & SA_SIGINFO)
		ret = setup_rt_frame(usig, ka, info, oldset, regs);
	else
		ret = setup_frame(usig, ka, oldset, regs);

	/*
	 * Check that the resulting registers are actually sane.
	 */
	ret |= !valid_user_regs(regs);

	if (ret != 0) {
		force_sigsegv(sig, tsk);
		return ret;
	}

	/*
	 * Block the signal if we were successful.
	 */
	spin_lock_irq(&tsk->sighand->siglock);
	sigorsets(&tsk->blocked, &tsk->blocked,
		  &ka->sa.sa_mask);
	if (!(ka->sa.sa_flags & SA_NODEFER))
		sigaddset(&tsk->blocked, sig);
	recalc_sigpending();
	spin_unlock_irq(&tsk->sighand->siglock);

	return 0;
}
Beispiel #28
0
static int dpram_thread(void *data)
{
	int ret = 0;
	int i;
	struct file *filp;
	struct sched_param schedpar;

	dpram_task = current;

	daemonize("dpram_thread");
	strcpy(current->comm, "multipdp");

	schedpar.sched_priority = 1;
	sched_setscheduler(current, SCHED_FIFO, &schedpar);

	/* set signals to accept */
	siginitsetinv(&current->blocked, sigmask(SIGUSR1));
	recalc_sigpending();

	for (i = 0; i < 10; i++) {
	filp = dpram_open();
	if (filp == NULL) {
			EPRINTK("dpram_open failed! retry\n");
			msleep(1000);
		} else
			break;
	}
	if (filp == NULL) {
		EPRINTK("dpram_open failed!\n");
		goto out;
	}

	dpram_filp = filp;

	/* send start signal */
	complete(&dpram_complete);

	while (1) {
		ret = dpram_poll(filp);

		if (ret == -ERESTARTSYS) {
			if (sigismember(&current->pending.signal, SIGUSR1)) {
				sigdelset(&current->pending.signal, SIGUSR1);
				recalc_sigpending();
				ret = 0;
				break;
			}
		}
		
		else if (ret < 0) {
			EPRINTK("dpram_poll() failed\n");
			break;
		}
		
		else {
			char ch;
			ret = dpram_read(dpram_filp, &ch, sizeof(ch));

			if(ret < 0) {
				return ret;
			}

			if (ch == 0x7f) {
				pdp_demux();
			}
		}
	}

	dpram_close(filp);
	dpram_filp = NULL;

out:
	dpram_task = NULL;

	/* send finish signal and exit */
	complete_and_exit(&dpram_complete, ret);
}
Beispiel #29
0
/*
 * OK, we're invoking a handler
 */
static int handle_signal(struct pt_regs *regs, unsigned long signr,
			 struct k_sigaction *ka, struct siginfo *info,
			 sigset_t *oldset)
{
	unsigned long sp;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	/* Did we come from a system call? */
	if (PT_REGS_SYSCALL_NR(regs) >= 0) {
		/* If so, check system call restarting.. */
		switch (PT_REGS_SYSCALL_RET(regs)) {
		case -ERESTART_RESTARTBLOCK:
		case -ERESTARTNOHAND:
			PT_REGS_SYSCALL_RET(regs) = -EINTR;
			break;

		case -ERESTARTSYS:
			if (!(ka->sa.sa_flags & SA_RESTART)) {
				PT_REGS_SYSCALL_RET(regs) = -EINTR;
				break;
			}
		/* fallthrough */
		case -ERESTARTNOINTR:
			PT_REGS_RESTART_SYSCALL(regs);
			PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
			break;
		}
	}

	sp = PT_REGS_SP(regs);
	if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
		sp = current->sas_ss_sp + current->sas_ss_size;

#ifdef CONFIG_ARCH_HAS_SC_SIGNALS
	if (!(ka->sa.sa_flags & SA_SIGINFO))
		err = setup_signal_stack_sc(sp, signr, ka, regs, oldset);
	else
#endif
		err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset);

	if (err) {
		spin_lock_irq(&current->sighand->siglock);
		current->blocked = *oldset;
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
		force_sigsegv(signr, current);
	} else {
		spin_lock_irq(&current->sighand->siglock);
		sigorsets(&current->blocked, &current->blocked,
			  &ka->sa.sa_mask);
		if (!(ka->sa.sa_flags & SA_NODEFER))
			sigaddset(&current->blocked, signr);
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}

	return err;
}
long
ia64_rt_sigreturn (struct sigscratch *scr)
{
	extern char ia64_strace_leave_kernel, ia64_leave_kernel;
	struct sigcontext __user *sc;
	struct siginfo si;
	sigset_t set;
	long retval;

	sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc;

	/*
	 * When we return to the previously executing context, r8 and r10 have already
	 * been setup the way we want them.  Indeed, if the signal wasn't delivered while
	 * in a system call, we must not touch r8 or r10 as otherwise user-level state
	 * could be corrupted.
	 */
	retval = (long) &ia64_leave_kernel;
	if (test_thread_flag(TIF_SYSCALL_TRACE)
	    || test_thread_flag(TIF_SYSCALL_AUDIT))
		/*
		 * strace expects to be notified after sigreturn returns even though the
		 * context to which we return may not be in the middle of a syscall.
		 * Thus, the return-value that strace displays for sigreturn is
		 * meaningless.
		 */
		retval = (long) &ia64_strace_leave_kernel;

	if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
		goto give_sigsegv;

	if (GET_SIGSET(&set, &sc->sc_mask))
		goto give_sigsegv;

	sigdelsetmask(&set, ~_BLOCKABLE);

	spin_lock_irq(&current->sighand->siglock);
	{
		current->blocked = set;
		recalc_sigpending();
	}
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(sc, scr))
		goto give_sigsegv;

#if DEBUG_SIG
	printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
	       current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
#endif
	/*
	 * It is more difficult to avoid calling this function than to
	 * call it and ignore errors.
	 */
	do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12);
	return retval;

  give_sigsegv:
	si.si_signo = SIGSEGV;
	si.si_errno = 0;
	si.si_code = SI_KERNEL;
	si.si_pid = task_pid_vnr(current);
	si.si_uid = current_uid();
	si.si_addr = sc;
	force_sig_info(SIGSEGV, &si, current);
	return retval;
}