Exemple #1
0
asmlinkage void do_sigreturn(struct pt_regs *regs)
{
	struct signal_frame __user *sf;
	unsigned long up_psr, pc, npc;
	sigset_t set;
	__siginfo_fpu_t __user *fpu_save;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack();

	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];

	/* 1. Make sure we are not getting garbage from the user */
	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
		goto segv_and_exit;

	if (((unsigned long) sf) & 3)
		goto segv_and_exit;

	err = __get_user(pc,  &sf->info.si_regs.pc);
	err |= __get_user(npc, &sf->info.si_regs.npc);

	if ((pc | npc) & 3)
		goto segv_and_exit;

	/* 2. Restore the state */
	up_psr = regs->psr;
	err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));

	/* User can only change condition codes and FPU enabling in %psr. */
	regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
		  | (regs->psr & (PSR_ICC | PSR_EF));

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	err |= __get_user(fpu_save, &sf->fpu_save);

	if (fpu_save)
		err |= restore_fpu_state(regs, fpu_save);

	/* This is pretty much atomic, no amount locking would prevent
	 * the races which exist anyways.
	 */
	err |= __get_user(set.sig[0], &sf->info.si_mask);
	err |= __copy_from_user(&set.sig[1], &sf->extramask,
			        (_NSIG_WORDS-1) * sizeof(unsigned int));
			   
	if (err)
		goto segv_and_exit;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;

segv_and_exit:
	force_sig(SIGSEGV, current);
}
Exemple #2
0
/*
 * OK, we're invoking a handler
 */	
static void
handle_signal(unsigned long sig, struct k_sigaction *ka,
	      siginfo_t *info, sigset_t *oldset,
	      struct pt_regs * regs, int syscall)
{
	struct thread_info *thread = current_thread_info();
	struct task_struct *tsk = current;
	int usig = sig;
	int ret;

	/*
	 * If we were from a system call, check for system call restarting...
	 */
	if (syscall) {
		switch (regs->ARM_r0) {
		case -ERESTART_RESTARTBLOCK:
		case -ERESTARTNOHAND:
			regs->ARM_r0 = -EINTR;
			break;
		case -ERESTARTSYS:
			if (!(ka->sa.sa_flags & SA_RESTART)) {
				regs->ARM_r0 = -EINTR;
				break;
			}
			/* fallthrough */
		case -ERESTARTNOINTR:
			restart_syscall(regs);
		}
	}

	/*
	 * translate the signal
	 */
	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
		usig = thread->exec_domain->signal_invmap[usig];

	/*
	 * Set up the stack frame
	 */
	if (ka->sa.sa_flags & SA_SIGINFO)
		ret = setup_rt_frame(usig, ka, info, oldset, regs);
	else
		ret = setup_frame(usig, ka, oldset, regs);

	/*
	 * Check that the resulting registers are actually sane.
	 */
	ret |= !valid_user_regs(regs);

	if (ret != 0) {
		force_sigsegv(sig, tsk);
		return;
	}

	/*
	 * Block the signal if we were successful.
	 */
	spin_lock_irq(&tsk->sighand->siglock);
	sigorsets(&tsk->blocked, &tsk->blocked,
		  &ka->sa.sa_mask);
	if (!(ka->sa.sa_flags & SA_NODEFER))
		sigaddset(&tsk->blocked, sig);
	recalc_sigpending();
	spin_unlock_irq(&tsk->sighand->siglock);

}
Exemple #3
0
void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	unsigned long tpc, tnpc, tstate;
	__siginfo_fpu_t __user *fpu_save;
	__siginfo_rwin_t __user *rwin_save;
	sigset_t set;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack ();
	sf = (struct rt_signal_frame __user *)
		(regs->u_regs [UREG_FP] + STACK_BIAS);

	/* 1. Make sure we are not getting garbage from the user */
	if (((unsigned long) sf) & 3)
		goto segv;

	err = get_user(tpc, &sf->regs.tpc);
	err |= __get_user(tnpc, &sf->regs.tnpc);
	if (test_thread_flag(TIF_32BIT)) {
		tpc &= 0xffffffff;
		tnpc &= 0xffffffff;
	}
	err |= ((tpc | tnpc) & 3);

	/* 2. Restore the state */
	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(tstate, &sf->regs.tstate);
	err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));

	/* User can only change condition codes and %asi in %tstate. */
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (!err && fpu_save)
		err |= restore_fpu_state(regs, fpu_save);

	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	err |= restore_altstack(&sf->stack);
	if (err)
		goto segv;

	err |= __get_user(rwin_save, &sf->rwin_save);
	if (!err && rwin_save) {
		if (restore_rwin_state(rwin_save))
			goto segv;
	}

	regs->tpc = tpc;
	regs->tnpc = tnpc;

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	set_current_blocked(&set);
	return;
segv:
	force_sig(SIGSEGV, current);
}
Exemple #4
0
/*
 * This routine handles page faults.  It determines the address, and the
 * problem, and then passes it handle_page_fault() for normal DTLB and
 * ITLB issues, and for DMA or SN processor faults when we are in user
 * space.  For the latter, if we're in kernel mode, we just save the
 * interrupt away appropriately and return immediately.  We can't do
 * page faults for user code while in kernel mode.
 */
void do_page_fault(struct pt_regs *regs, int fault_num,
		   unsigned long address, unsigned long write)
{
	int is_page_fault;

#ifdef CONFIG_KPROBES
	/*
	 * This is to notify the fault handler of the kprobes.  The
	 * exception code is redundant as it is also carried in REGS,
	 * but we pass it anyhow.
	 */
	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
		       regs->faultnum, SIGSEGV) == NOTIFY_STOP)
		return;
#endif

#ifdef __tilegx__
	/*
	 * We don't need early do_page_fault_ics() support, since unlike
	 * Pro we don't need to worry about unlocking the atomic locks.
	 * There is only one current case in GX where we touch any memory
	 * under ICS other than our own kernel stack, and we handle that
	 * here.  (If we crash due to trying to touch our own stack,
	 * we're in too much trouble for C code to help out anyway.)
	 */
	if (write & ~1) {
		unsigned long pc = write & ~1;
		if (pc >= (unsigned long) __start_unalign_asm_code &&
		    pc < (unsigned long) __end_unalign_asm_code) {
			struct thread_info *ti = current_thread_info();
			/*
			 * Our EX_CONTEXT is still what it was from the
			 * initial unalign exception, but now we've faulted
			 * on the JIT page.  We would like to complete the
			 * page fault however is appropriate, and then retry
			 * the instruction that caused the unalign exception.
			 * Our state has been "corrupted" by setting the low
			 * bit in "sp", and stashing r0..r3 in the
			 * thread_info area, so we revert all of that, then
			 * continue as if this were a normal page fault.
			 */
			regs->sp &= ~1UL;
			regs->regs[0] = ti->unalign_jit_tmp[0];
			regs->regs[1] = ti->unalign_jit_tmp[1];
			regs->regs[2] = ti->unalign_jit_tmp[2];
			regs->regs[3] = ti->unalign_jit_tmp[3];
			write &= 1;
		} else {
			pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
				 current->comm, current->pid, pc, address);
			show_regs(regs);
			do_group_exit(SIGKILL);
			return;
		}
	}
#else
	/* This case should have been handled by do_page_fault_ics(). */
	BUG_ON(write & ~1);
#endif

#if CHIP_HAS_TILE_DMA()
	/*
	 * If it's a DMA fault, suspend the transfer while we're
	 * handling the miss; we'll restart after it's handled.  If we
	 * don't suspend, it's possible that this process could swap
	 * out and back in, and restart the engine since the DMA is
	 * still 'running'.
	 */
	if (fault_num == INT_DMATLB_MISS ||
	    fault_num == INT_DMATLB_ACCESS ||
	    fault_num == INT_DMATLB_MISS_DWNCL ||
	    fault_num == INT_DMATLB_ACCESS_DWNCL) {
		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
		while (__insn_mfspr(SPR_DMA_USER_STATUS) &
		       SPR_DMA_STATUS__BUSY_MASK)
			;
	}
#endif

	/* Validate fault num and decide if this is a first-time page fault. */
	switch (fault_num) {
	case INT_ITLB_MISS:
	case INT_DTLB_MISS:
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_MISS:
	case INT_DMATLB_MISS_DWNCL:
#endif
		is_page_fault = 1;
		break;

	case INT_DTLB_ACCESS:
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_ACCESS:
	case INT_DMATLB_ACCESS_DWNCL:
#endif
		is_page_fault = 0;
		break;

	default:
		panic("Bad fault number %d in do_page_fault", fault_num);
	}

#if CHIP_HAS_TILE_DMA()
	if (!user_mode(regs)) {
		struct async_tlb *async;
		switch (fault_num) {
#if CHIP_HAS_TILE_DMA()
		case INT_DMATLB_MISS:
		case INT_DMATLB_ACCESS:
		case INT_DMATLB_MISS_DWNCL:
		case INT_DMATLB_ACCESS_DWNCL:
			async = &current->thread.dma_async_tlb;
			break;
#endif
		default:
			async = NULL;
		}
		if (async) {

			/*
			 * No vmalloc check required, so we can allow
			 * interrupts immediately at this point.
			 */
			local_irq_enable();

			set_thread_flag(TIF_ASYNC_TLB);
			if (async->fault_num != 0) {
				panic("Second async fault %d;"
				      " old fault was %d (%#lx/%ld)",
				      fault_num, async->fault_num,
				      address, write);
			}
			BUG_ON(fault_num == 0);
			async->fault_num = fault_num;
			async->is_fault = is_page_fault;
			async->is_write = write;
			async->address = address;
			return;
		}
	}
#endif

	handle_page_fault(regs, fault_num, is_page_fault, address, write);
}
Exemple #5
0
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			  sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	int err = 0;
	int signal;

	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	signal = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	err |= __put_user(&frame->info, &frame->pinfo);
	err |= __put_user(&frame->uc, &frame->puc);
	err |= copy_siginfo_to_user(&frame->info, info);

	/* Give up earlier as i386, in case */
	if (err)
		goto give_sigsegv;

	/* Create the ucontext.  */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __put_user((void *)current->sas_ss_sp,
			  &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext,
			        regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	/* Give up earlier as i386, in case */
	if (err)
		goto give_sigsegv;

	/* Set up to return from userspace.  If provided, use a stub
	   already in userspace.  */
	if (ka->sa.sa_flags & SA_RESTORER) {
		/*
		 * On SH5 all edited pointers are subject to NEFF
		 */
		DEREF_REG_PR = neff_sign_extend((unsigned long)
			ka->sa.sa_restorer | 0x1);
	} else {
		/*
		 * Different approach on SH5.
	         * . Endianness independent asm code gets placed in entry.S .
		 *   This is limited to four ASM instructions corresponding
		 *   to two long longs in size.
		 * . err checking is done on the else branch only
		 * . flush_icache_range() is called upon __put_user() only
		 * . all edited pointers are subject to NEFF
		 * . being code, linker turns ShMedia bit on, always
		 *   dereference index -1.
		 */
		DEREF_REG_PR = neff_sign_extend((unsigned long)
			frame->retcode | 0x01);

		if (__copy_to_user(frame->retcode,
			(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
			goto give_sigsegv;

		/* Cohere the trampoline with the I-cache. */
		flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
	}

	/*
	 * Set up registers for signal handler.
	 * All edited pointers are subject to NEFF.
	 */
	regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
	regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);

	set_fs(USER_DS);

	pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
		 signal, current->comm, current->pid, frame,
		 regs->pc >> 32, regs->pc & 0xffffffff,
		 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
static int get_current_node(void)
{
	return cpu_data(current_thread_info()->cpu).phys_proc_id;
}
Exemple #7
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 */

	for (;;) {
		struct task_struct *owner;

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;

		if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			preempt_enable();
			return 0;
		}

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		arch_mutex_cpu_relax();
	}
#endif
	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	if (atomic_xchg(&lock->count, -1) == 1)
		goto done;

	lock_contended(&lock->dep_map, ip);

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		if (atomic_xchg(&lock->count, -1) == 1)
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			mutex_release(&lock->dep_map, 1, ip);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			preempt_enable();
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	lock_acquired(&lock->dep_map, ip);
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);
	preempt_enable();

	return 0;
}
Exemple #8
0
int __range_ok(unsigned long addr, unsigned long size)
{
	unsigned long limit = current_thread_info()->addr_limit.seg;
	return !((addr < limit && size <= limit - addr) ||
		 is_arch_mappable_range(addr, size));
}
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			   sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	int err = 0;
	int signal;

	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	signal = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	err |= copy_siginfo_to_user(&frame->info, info);

	/* Create the ucontext.  */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(NULL, &frame->uc.uc_link);
	err |= __put_user((void *)current->sas_ss_sp,
			  &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->regs[15]),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext,
			        regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	/* Set up to return from userspace.  If provided, use a stub
	   already in userspace.  */
	if (ka->sa.sa_flags & SA_RESTORER) {
		regs->pr = (unsigned long) ka->sa.sa_restorer;
#ifdef CONFIG_VSYSCALL
	} else if (likely(current->mm->context.vdso)) {
		regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
#endif
	} else {
		/* Generate return code (system call to rt_sigreturn) */
		err |= __put_user(MOVW(7), &frame->retcode[0]);
		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
		regs->pr = (unsigned long) frame->retcode;
		flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
	}

	if (err)
		goto give_sigsegv;

	/* Set up registers for signal handler */
	regs->regs[15] = (unsigned long) frame;
	regs->regs[4] = signal; /* Arg for signal handler */
	regs->regs[5] = (unsigned long) &frame->info;
	regs->regs[6] = (unsigned long) &frame->uc;

	if (current->personality & FDPIC_FUNCPTRS) {
		struct fdpic_func_descriptor __user *funcptr =
			(struct fdpic_func_descriptor __user *)ka->sa.sa_handler;

		err |= __get_user(regs->pc, &funcptr->text);
		err |= __get_user(regs->regs[12], &funcptr->GOT);
	} else
		regs->pc = (unsigned long)ka->sa.sa_handler;

	if (err)
		goto give_sigsegv;

	set_fs(USER_DS);

	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
	/* avoid HT sibilings if possible */
	if (cpumask_empty(tmp))
		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
	if (cpumask_empty(tmp)) {
		mutex_unlock(&isolated_cpus_lock);
		return;
	}
	for_each_cpu(cpu, tmp) {
		if (cpu_weight[cpu] < min_weight) {
			min_weight = cpu_weight[cpu];
			preferred_cpu = cpu;
		}
	}

	if (tsk_in_cpu[tsk_index] != -1)
		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = preferred_cpu;
	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
	cpu_weight[preferred_cpu]++;
	mutex_unlock(&isolated_cpus_lock);

	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}

static void exit_round_robin(unsigned int tsk_index)
{
	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
	cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = -1;
}

static unsigned int idle_pct = 5; /* percentage */
static unsigned int round_robin_time = 10; /* second */
static int power_saving_thread(void *data)
{
	struct sched_param param = {.sched_priority = 1};
	int do_sleep;
	unsigned int tsk_index = (unsigned long)data;
	u64 last_jiffies = 0;

	sched_setscheduler(current, SCHED_RR, &param);

	while (!kthread_should_stop()) {
		int cpu;
		u64 expire_time;

		try_to_freeze();

		/* round robin to cpus */
		if (last_jiffies + round_robin_time * HZ < jiffies) {
			last_jiffies = jiffies;
			round_robin_cpu(tsk_index);
		}

		do_sleep = 0;

		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		expire_time = jiffies + HZ * (100 - idle_pct) / 100;

		while (!need_resched()) {
			local_irq_disable();
			cpu = smp_processor_id();
			clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
				&cpu);
			stop_critical_timings();

			__monitor((void *)&current_thread_info()->flags, 0, 0);
			smp_mb();
			if (!need_resched())
				__mwait(power_saving_mwait_eax, 1);

			start_critical_timings();
			clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
				&cpu);
			local_irq_enable();

			if (jiffies > expire_time) {
				do_sleep = 1;
				break;
			}
		}

		current_thread_info()->status |= TS_POLLING;

		/*
		 * current sched_rt has threshold for rt task running time.
		 * When a rt task uses 95% CPU time, the rt thread will be
		 * scheduled out for 5% CPU time to not starve other tasks. But
		 * the mechanism only works when all CPUs have RT task running,
		 * as if one CPU hasn't RT task, RT task from other CPUs will
		 * borrow CPU time from this CPU and cause RT task use > 95%
		 * CPU time. To make 'avoid starvation' work, takes a nap here.
		 */
		if (do_sleep)
			schedule_timeout_killable(HZ * idle_pct / 100);
	}

	exit_round_robin(tsk_index);
	return 0;
}

static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
	int rc = -ENOMEM;

	ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
		(void *)(unsigned long)ps_tsk_num,
		"power_saving/%d", ps_tsk_num);
	rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
	if (!rc)
		ps_tsk_num++;
	else
		ps_tsks[ps_tsk_num] = NULL;

	return rc;
}
Exemple #11
0
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	int err = 0;
	int signal;
	unsigned long address = 0;
#ifdef CONFIG_MMU
	pmd_t *pmdp;
	pte_t *ptep;
#endif

	frame = get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	signal = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	if (info)
		err |= copy_siginfo_to_user(&frame->info, info);

	/* Create the ucontext. */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __put_user((void *)current->sas_ss_sp,
			&frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->r1),
			&frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext,
			regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	/* Set up to return from userspace. If provided, use a stub
	 already in userspace. */
	/* minus 8 is offset to cater for "rtsd r15,8" */
	if (ka->sa.sa_flags & SA_RESTORER) {
		regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
	} else {
		/* addi r12, r0, __NR_sigreturn */
		err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
				frame->tramp + 0);
		/* brki r14, 0x8 */
		err |= __put_user(0xb9cc0008, frame->tramp + 1);

		/* Return from sighandler will jump to the tramp.
		 Negative 8 offset because return is rtsd r15, 8 */
		regs->r15 = ((unsigned long)frame->tramp)-8;

		address = ((unsigned long)frame->tramp);
#ifdef CONFIG_MMU
		pmdp = pmd_offset(pud_offset(
				pgd_offset(current->mm, address),
						address), address);

		preempt_disable();
		ptep = pte_offset_map(pmdp, address);
		if (pte_present(*ptep)) {
			address = (unsigned long) page_address(pte_page(*ptep));
			/* MS: I need add offset in page */
			address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
			/* MS address is virtual */
			address = virt_to_phys(address);
			invalidate_icache_range(address, address + 8);
			flush_dcache_range(address, address + 8);
		}
		pte_unmap(ptep);
		preempt_enable();
#else
		invalidate_icache_range(address, address + 8);
		flush_dcache_range(address, address + 8);
#endif
	}

	if (err)
		goto give_sigsegv;

	/* Set up registers for signal handler */
	regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE;

	/* Signal handler args: */
	regs->r5 = signal; /* arg 0: signum */
	regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
	regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
	/* Offset to handle microblaze rtid r14, 0 */
	regs->pc = (unsigned long)ka->sa.sa_handler;

	set_fs(USER_DS);

	/* the tracer may want to single-step inside the handler */
	if (test_thread_flag(TIF_SINGLESTEP))
		ptrace_notify(SIGTRAP);

#ifdef DEBUG_SIG
	printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
		current->comm, current->pid, frame, regs->pc);
#endif

	return;

give_sigsegv:
	if (sig == SIGSEGV)
		ka->sa.sa_handler = SIG_DFL;
	force_sig(SIGSEGV, current);
}
Exemple #12
0
void do_rt_sigreturn(struct pt_regs *regs)
{
    struct rt_signal_frame __user *sf;
    unsigned long tpc, tnpc, tstate;
    __siginfo_fpu_t __user *fpu_save;
    mm_segment_t old_fs;
    sigset_t set;
    stack_t st;
    int err;

    /* Always make any pending restarted system calls return -EINTR */
    current_thread_info()->restart_block.fn = do_no_restart_syscall;

    synchronize_user_stack ();
    sf = (struct rt_signal_frame __user *)
         (regs->u_regs [UREG_FP] + STACK_BIAS);

    /* 1. Make sure we are not getting garbage from the user */
    if (((unsigned long) sf) & 3)
        goto segv;

    err = get_user(tpc, &sf->regs.tpc);
    err |= __get_user(tnpc, &sf->regs.tnpc);
    if (test_thread_flag(TIF_32BIT)) {
        tpc &= 0xffffffff;
        tnpc &= 0xffffffff;
    }
    err |= ((tpc | tnpc) & 3);

    /* 2. Restore the state */
    err |= __get_user(regs->y, &sf->regs.y);
    err |= __get_user(tstate, &sf->regs.tstate);
    err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));

    /* User can only change condition codes and %asi in %tstate. */
    regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
    regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));

    err |= __get_user(fpu_save, &sf->fpu_save);
    if (fpu_save)
        err |= restore_fpu_state(regs, &sf->fpu_state);

    err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
    err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));

    if (err)
        goto segv;

    regs->tpc = tpc;
    regs->tnpc = tnpc;

    /* It is more difficult to avoid calling this function than to
       call it and ignore errors.  */
    old_fs = get_fs();
    set_fs(KERNEL_DS);
    do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
    set_fs(old_fs);

    sigdelsetmask(&set, ~_BLOCKABLE);
    spin_lock_irq(&current->sighand->siglock);
    current->blocked = set;
    recalc_sigpending();
    spin_unlock_irq(&current->sighand->siglock);
    return;
segv:
    force_sig(SIGSEGV, current);
}
Exemple #13
0
static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
			   int signo, sigset_t *oldset, siginfo_t *info)
{
	struct rt_signal_frame __user *sf;
	int sigframe_size;
	unsigned int psr;
	int err;

	synchronize_user_stack();
	sigframe_size = RT_ALIGNEDSZ;
	if (!used_math())
		sigframe_size -= sizeof(__siginfo_fpu_t);
	sf = (struct rt_signal_frame __user *)
		get_sigframe(&ka->sa, regs, sigframe_size);
	if (invalid_frame_pointer(sf, sigframe_size))
		goto sigill;
	if (current_thread_info()->w_saved != 0)
		goto sigill;

	err  = __put_user(regs->pc, &sf->regs.pc);
	err |= __put_user(regs->npc, &sf->regs.npc);
	err |= __put_user(regs->y, &sf->regs.y);
	psr = regs->psr;
	if (used_math())
		psr |= PSR_EF;
	err |= __put_user(psr, &sf->regs.psr);
	err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs));
	err |= __put_user(0, &sf->extra_size);

	if (psr & PSR_EF) {
		err |= save_fpu_state(regs, &sf->fpu_state);
		err |= __put_user(&sf->fpu_state, &sf->fpu_save);
	} else {
		err |= __put_user(0, &sf->fpu_save);
	}
	err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
	
	/* Setup sigaltstack */
	err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
	
	err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
			      sizeof(struct reg_window32));

	err |= copy_siginfo_to_user(&sf->info, info);

	if (err)
		goto sigsegv;

	regs->u_regs[UREG_FP] = (unsigned long) sf;
	regs->u_regs[UREG_I0] = signo;
	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
	regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;

	regs->pc = (unsigned long) ka->sa.sa_handler;
	regs->npc = (regs->pc + 4);

	if (ka->ka_restorer)
		regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
	else {
		regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);

		/* mov __NR_sigreturn, %g1 */
		err |= __put_user(0x821020d8, &sf->insns[0]);

		/* t 0x10 */
		err |= __put_user(0x91d02010, &sf->insns[1]);
		if (err)
			goto sigsegv;

		/* Flush instruction space. */
		flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
	}
	return;

sigill:
	do_exit(SIGILL);
sigsegv:
	force_sigsegv(signo, current);
}
Exemple #14
0
static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
			int signo, sigset_t *oldset)
{
	struct signal_frame __user *sf;
	int sigframe_size, err;

	/* 1. Make sure everything is clean */
	synchronize_user_stack();

	sigframe_size = SF_ALIGNEDSZ;
	if (!used_math())
		sigframe_size -= sizeof(__siginfo_fpu_t);

	sf = (struct signal_frame __user *)
		get_sigframe(&ka->sa, regs, sigframe_size);

	if (invalid_frame_pointer(sf, sigframe_size))
		goto sigill_and_return;

	if (current_thread_info()->w_saved != 0)
		goto sigill_and_return;

	/* 2. Save the current process state */
	err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
	
	err |= __put_user(0, &sf->extra_size);

	if (used_math()) {
		err |= save_fpu_state(regs, &sf->fpu_state);
		err |= __put_user(&sf->fpu_state, &sf->fpu_save);
	} else {
		err |= __put_user(0, &sf->fpu_save);
	}

	err |= __put_user(oldset->sig[0], &sf->info.si_mask);
	err |= __copy_to_user(sf->extramask, &oldset->sig[1],
			      (_NSIG_WORDS - 1) * sizeof(unsigned int));
	err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
			      sizeof(struct reg_window32));
	if (err)
		goto sigsegv;
	
	/* 3. signal handler back-trampoline and parameters */
	regs->u_regs[UREG_FP] = (unsigned long) sf;
	regs->u_regs[UREG_I0] = signo;
	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
	regs->u_regs[UREG_I2] = (unsigned long) &sf->info;

	/* 4. signal handler */
	regs->pc = (unsigned long) ka->sa.sa_handler;
	regs->npc = (regs->pc + 4);

	/* 5. return to kernel instructions */
	if (ka->ka_restorer)
		regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
	else {
		regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);

		/* mov __NR_sigreturn, %g1 */
		err |= __put_user(0x821020d8, &sf->insns[0]);

		/* t 0x10 */
		err |= __put_user(0x91d02010, &sf->insns[1]);
		if (err)
			goto sigsegv;

		/* Flush instruction space. */
		flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
	}
	return;

sigill_and_return:
	do_exit(SIGILL);
sigsegv:
	force_sigsegv(signo, current);
}
Exemple #15
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time;
	s64 sleep_ticks = 0;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	sleep_ticks = us_to_pm_timer_ticks(idle_time);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += sleep_ticks;
	return idle_time;
}
static long
restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
{
	unsigned long ip, flags, nat, um, cfm, rsc;
	long err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	/* restore scratch that always needs gets updated during signal delivery: */
	err  = __get_user(flags, &sc->sc_flags);
	err |= __get_user(nat, &sc->sc_nat);
	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
	err |= __get_user(cfm, &sc->sc_cfm);
	err |= __get_user(um, &sc->sc_um);			/* user mask */
	err |= __get_user(rsc, &sc->sc_ar_rsc);
	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
	err |= __get_user(scr->pt.pr, &sc->sc_pr);		/* predicates */
	err |= __get_user(scr->pt.b0, &sc->sc_br[0]);		/* b0 (rp) */
	err |= __get_user(scr->pt.b6, &sc->sc_br[6]);		/* b6 */
	err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8);	/* r1 */
	err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8);	/* r8-r11 */
	err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8);	/* r12-r13 */
	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */

	scr->pt.cr_ifs = cfm | (1UL << 63);
	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */

	/* establish new instruction pointer: */
	scr->pt.cr_iip = ip & ~0x3UL;
	ia64_psr(&scr->pt)->ri = ip & 0x3;
	scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM);

	scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);

	if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
		/* Restore most scratch-state only when not in syscall. */
		err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);		/* ar.ccv */
		err |= __get_user(scr->pt.b7, &sc->sc_br[7]);			/* b7 */
		err |= __get_user(scr->pt.r14, &sc->sc_gr[14]);			/* r14 */
		err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
		err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8);	/* r2-r3 */
		err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8);	/* r16-r31 */
	}

	if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) {
		struct ia64_psr *psr = ia64_psr(&scr->pt);

		err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
		psr->mfh = 0;	/* drop signal handler's fph contents... */
		preempt_disable();
		if (psr->dfh)
			ia64_drop_fpu(current);
		else {
			/* We already own the local fph, otherwise psr->dfh wouldn't be 0.  */
			__ia64_load_fpu(current->thread.fph);
			ia64_set_local_fpu_owner(current);
		}
		preempt_enable();
	}
	return err;
}
Exemple #17
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time;
	s64 sleep_ticks = 0;


	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	if (acpi_idle_bm_check()) {
		if (dev->safe_state) {
			dev->last_state = dev->safe_state;
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return 0;
		}
	}

	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	sleep_ticks = us_to_pm_timer_ticks(idle_time);
	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += sleep_ticks;
	return idle_time;
}
Exemple #18
0
static void
setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
	       sigset_t *set, struct exregs_regs *regs)
{
	struct rt_sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
	unsigned long handler = (unsigned long)ka->sa.sa_handler;
	int err = 0;
	printk_dbg("%s called\n", __func__);

	if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
		goto badframe;

	__put_user_error(&frame->info, &frame->pinfo, err);
	__put_user_error(&frame->uc, &frame->puc, err);
	err |= copy_siginfo_to_user(&frame->info, info);

	/* Clear all the bits of the ucontext we don't use.  */
	err |= clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));

	err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/
				regs, set->sig[0]);
	err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	err |= __put_user(handler, &frame->sig_ip);
	printk_dbg("%s frame->sig_ip = %lx\n", __func__, handler);

	if (!err)
		err = setup_return(regs, ka, &frame->retcode, frame,
				&frame->lr, &frame->usig, usig);

	if (!err) {
		/*
		 * For realtime signals we must also set the second and third
		 * arguments for the signal handler.
		 *   -- Peter Maydell <*****@*****.**> 2000-12-06
		 */
//		ARM_put_r1(regs, (unsigned long)frame->pinfo);
//		ARM_put_r2(regs, (unsigned long)frame->puc);
	}

	if (err)
		goto badframe;

	regs->ip = TASK_SIG_BASE;
	switch (regs->syscall_action)
	{
	case 1:	/* Syscall */
		regs->ip += ((unsigned long)&__wombat_user_rt_sigentry) & ~PAGE_MASK;
		break;
	case 0:	/* Fault */
	case 2:	/* Restart syscall */
		regs->ip += ((unsigned long)&__wombat_user_rt_sigentry_restart) & ~PAGE_MASK;
		break;
	case 4:	/* Interrupt syscall */
		regs->ip += ((unsigned long)&__wombat_user_rt_sigentry_int) & ~PAGE_MASK;
		break;
	default:
		BUG();
	}

	set_need_restart(current_thread_info(), regs->ip,
			regs->sp, regs->flags);

	printk_dbg("SIG rt deliver (%s:%d:%lx): sp=%p pc=%p\n",
			current->comm, current->pid,
			current_thread_info()->user_tid.raw,
			frame, (void*)regs->ip);

	return;

badframe:
	force_sigsegv(usig, current);
	return;
}
Exemple #19
0
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp,
				unsigned long map_address)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr = map_address;
	int ret = 0;
	bool compat;
	unsigned long flags;

	if (vdso_enabled == VDSO_DISABLED && map_address == 0) {
		current->mm->context.vdso = NULL;
		return 0;
	}

	flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | VM_MAYWRITE |
		mm->def_flags;

	ret = -ENOMEM;
	if (ub_memory_charge(mm, PAGE_SIZE, flags, NULL, UB_SOFT))
		goto err_charge;

	down_write(&mm->mmap_sem);

	/* Test compat mode once here, in case someone
	   changes it via sysctl */
	compat = (vdso_enabled == VDSO_COMPAT);

	map_compat_vdso(compat);

	if (!compat || map_address) {
		addr = get_unmapped_area_prot(NULL, addr, PAGE_SIZE, 0, 0, 1);
		if (IS_ERR_VALUE(addr)) {
			ret = addr;
			goto up_fail;
		}
	} else
		addr = VDSO_HIGH_BASE;

	current->mm->context.vdso = (void *)addr;

	if (compat_uses_vma || !compat || map_address) {
		/*
		 * MAYWRITE to allow gdb to COW and set breakpoints
		 *
		 * Make sure the vDSO gets into every core dump.
		 * Dumping its contents makes post-mortem fully
		 * interpretable later without matching up the same
		 * kernel and hardware config to see what PC values
		 * meant.
		 */
		ret = install_special_mapping(mm, addr, PAGE_SIZE,
					      VM_READ|VM_EXEC|
					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
					      VM_ALWAYSDUMP,
					      vdso32_pages);

		if (ret)
			goto up_fail;
	}

	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);
	if (ret < 0)
		ub_memory_uncharge(mm, PAGE_SIZE, flags, NULL);
err_charge:

	return ret;
}
Exemple #20
0
/*
 * This is called when the thread calls exit().
 */
void exit_thread(void)
{
#if XTENSA_HAVE_COPROCESSORS
	coprocessor_release_all(current_thread_info());
#endif
}
Exemple #21
0
/*
 * The interrupt handling path, implemented in terms of HV interrupt
 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
 */
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
	int depth = __get_cpu_var(irq_depth)++;
	unsigned long original_irqs;
	unsigned long remaining_irqs;
	struct pt_regs *old_regs;

#if CHIP_HAS_IPI()
	/*
	 * Pending interrupts are listed in an SPR.  We might be
	 * nested, so be sure to only handle irqs that weren't already
	 * masked by a previous interrupt.  Then, mask out the ones
	 * we're going to handle.
	 */
	unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
	original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
	__insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
#else
	/*
	 * Hypervisor performs the equivalent of the Gx code above and
	 * then puts the pending interrupt mask into a system save reg
	 * for us to find.
	 */
	original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
#endif
	remaining_irqs = original_irqs;

	/* Track time spent here in an interrupt context. */
	old_regs = set_irq_regs(regs);
	irq_enter();

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	/* Debugging check for stack overflow: less than 1/8th stack free? */
	{
		long sp = stack_pointer - (long) current_thread_info();
		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
			pr_emerg("tile_dev_intr: "
			       "stack overflow: %ld\n",
			       sp - sizeof(struct thread_info));
			dump_stack();
		}
	}
#endif
	while (remaining_irqs) {
		unsigned long irq = __ffs(remaining_irqs);
		remaining_irqs &= ~(1UL << irq);

		/* Count device irqs; Linux IPIs are counted elsewhere. */
		if (irq != IRQ_RESCHEDULE)
			__get_cpu_var(irq_stat).irq_dev_intr_count++;

		generic_handle_irq(irq);
	}

	/*
	 * If we weren't nested, turn on all enabled interrupts,
	 * including any that were reenabled during interrupt
	 * handling.
	 */
	if (depth == 0)
		unmask_irqs(~__get_cpu_var(irq_disable_mask));

	__get_cpu_var(irq_depth)--;

	/*
	 * Track time spent against the current process again and
	 * process any softirqs if they are waiting.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
Exemple #22
0
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
	struct ucontext __user *ucp = (struct ucontext __user *)
		regs->u_regs[UREG_I0];
	mc_gregset_t __user *grp;
	unsigned long pc, npc, tstate;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	flush_user_windows();
	if (get_thread_wsaved()					||
	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
	    (!__access_ok(ucp, sizeof(*ucp))))
		goto do_sigsegv;
	grp  = &ucp->uc_mcontext.mc_gregs;
	err  = __get_user(pc, &((*grp)[MC_PC]));
	err |= __get_user(npc, &((*grp)[MC_NPC]));
	if (err || ((pc | npc) & 3))
		goto do_sigsegv;
	if (regs->u_regs[UREG_I1]) {
		sigset_t set;

		if (_NSIG_WORDS == 1) {
			if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
				goto do_sigsegv;
		} else {
			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
				goto do_sigsegv;
		}
		sigdelsetmask(&set, ~_BLOCKABLE);
		spin_lock_irq(&current->sighand->siglock);
		current->blocked = set;
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}
	if (test_thread_flag(TIF_32BIT)) {
		pc &= 0xffffffff;
		npc &= 0xffffffff;
	}
	regs->tpc = pc;
	regs->tnpc = npc;
	err |= __get_user(regs->y, &((*grp)[MC_Y]));
	err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
	err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
	err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
	err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
	err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
	err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
	err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));

	/* Skip %g7 as that's the thread register in userspace.  */

	err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
	err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
	err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
	err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
	err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
	err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
	err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
	err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));

	err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
	err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
	err |= __put_user(fp,
	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __put_user(i7,
	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));

	err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
	if (fenab) {
		unsigned long *fpregs = current_thread_info()->fpregs;
		unsigned long fprs;
		
		fprs_write(0);
		err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
		if (fprs & FPRS_DL)
			err |= copy_from_user(fpregs,
					      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
					      (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_from_user(fpregs+16,
			 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
			 (sizeof(unsigned int) * 32));
		err |= __get_user(current_thread_info()->xfsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
		err |= __get_user(current_thread_info()->gsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
		regs->tstate &= ~TSTATE_PEF;
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	force_sig(SIGSEGV, current);
}
Exemple #23
0
static int setup_frame(int sig, struct k_sigaction *ka,
		       sigset_t *set, struct pt_regs *regs)
{
	struct sigframe __user *frame;
	int err = 0;
	int signal;

	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	signal = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);

	/* Give up earlier as i386, in case */
	if (err)
		goto give_sigsegv;

	if (_NSIG_WORDS > 1) {
		err |= __copy_to_user(frame->extramask, &set->sig[1],
				      sizeof(frame->extramask)); }

	/* Give up earlier as i386, in case */
	if (err)
		goto give_sigsegv;

	/* Set up to return from userspace.  If provided, use a stub
	   already in userspace.  */
	if (ka->sa.sa_flags & SA_RESTORER) {
		/*
		 * On SH5 all edited pointers are subject to NEFF
		 */
		DEREF_REG_PR = neff_sign_extend((unsigned long)
			ka->sa.sa_restorer | 0x1);
	} else {
		/*
		 * Different approach on SH5.
	         * . Endianness independent asm code gets placed in entry.S .
		 *   This is limited to four ASM instructions corresponding
		 *   to two long longs in size.
		 * . err checking is done on the else branch only
		 * . flush_icache_range() is called upon __put_user() only
		 * . all edited pointers are subject to NEFF
		 * . being code, linker turns ShMedia bit on, always
		 *   dereference index -1.
		 */
		DEREF_REG_PR = neff_sign_extend((unsigned long)
			frame->retcode | 0x01);

		if (__copy_to_user(frame->retcode,
			(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
			goto give_sigsegv;

		/* Cohere the trampoline with the I-cache. */
		flush_cache_sigtramp(DEREF_REG_PR-1);
	}

	/*
	 * Set up registers for signal handler.
	 * All edited pointers are subject to NEFF.
	 */
	regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */

        /* FIXME:
           The glibc profiling support for SH-5 needs to be passed a sigcontext
           so it can retrieve the PC.  At some point during 2003 the glibc
           support was changed to receive the sigcontext through the 2nd
           argument, but there are still versions of libc.so in use that use
           the 3rd argument.  Until libc.so is stabilised, pass the sigcontext
           through both 2nd and 3rd arguments.
        */

	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;

	regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);

	set_fs(USER_DS);

	/* Broken %016Lx */
	pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
		 signal, current->comm, current->pid, frame,
		 regs->pc >> 32, regs->pc & 0xffffffff,
		 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
Exemple #24
0
static inline int
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
	       int signo, sigset_t *oldset, siginfo_t *info)
{
	struct rt_signal_frame __user *sf;
	int sigframe_size, err;

	/* 1. Make sure everything is clean */
	synchronize_user_stack();
	save_and_clear_fpu();
	
	sigframe_size = sizeof(struct rt_signal_frame);
	if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
		sigframe_size -= sizeof(__siginfo_fpu_t);

	sf = (struct rt_signal_frame __user *)
		get_sigframe(ka, regs, sigframe_size);
	
	if (invalid_frame_pointer (sf, sigframe_size))
		goto sigill;

	if (get_thread_wsaved() != 0)
		goto sigill;

	/* 2. Save the current process state */
	err = copy_to_user(&sf->regs, regs, sizeof (*regs));

	if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
		err |= save_fpu_state(regs, &sf->fpu_state);
		err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
	} else {
		err |= __put_user(0, &sf->fpu_save);
	}
	
	/* Setup sigaltstack */
	err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);

	err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));

	err |= copy_in_user((u64 __user *)sf,
			    (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
			    sizeof(struct reg_window));

	if (info)
		err |= copy_siginfo_to_user(&sf->info, info);
	else {
		err |= __put_user(signo, &sf->info.si_signo);
		err |= __put_user(SI_NOINFO, &sf->info.si_code);
	}
	if (err)
		goto sigsegv;
	
	/* 3. signal handler back-trampoline and parameters */
	regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
	regs->u_regs[UREG_I0] = signo;
	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;

	/* The sigcontext is passed in this way because of how it
	 * is defined in GLIBC's /usr/include/bits/sigcontext.h
	 * for sparc64.  It includes the 128 bytes of siginfo_t.
	 */
	regs->u_regs[UREG_I2] = (unsigned long) &sf->info;

	/* 5. signal handler */
	regs->tpc = (unsigned long) ka->sa.sa_handler;
	regs->tnpc = (regs->tpc + 4);
	if (test_thread_flag(TIF_32BIT)) {
		regs->tpc &= 0xffffffff;
		regs->tnpc &= 0xffffffff;
	}
	/* 4. return to kernel instructions */
	regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
	return 0;

sigill:
	do_exit(SIGILL);
	return -EINVAL;

sigsegv:
	force_sigsegv(signo, current);
	return -EFAULT;
}
Exemple #25
0
/*
 * Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 *
 * Note that we go through the signals twice: once to check the signals that
 * the kernel can handle, and then we build all the user-level signal handling
 * stack-frames in one go after that.
 */
static void do_signal(struct pt_regs *regs)
{
	siginfo_t info;
	int signr;
	struct k_sigaction ka;
	sigset_t *oldset;

	/*
	 * We want the common case to go fast, which
	 * is why we may in certain cases get here from
	 * kernel mode. Just return without doing anything
	 * if so.
	 */
	if (!user_mode(regs))
		return;

	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
		oldset = &current->saved_sigmask;
	else
		oldset = &current->blocked;

	signr = get_signal_to_deliver(&info, &ka, regs, 0);
	if (signr > 0) {
		handle_syscall_restart(regs, &ka.sa);

		/* Whee!  Actually deliver the signal.  */
		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
			/*
			 * If a signal was successfully delivered, the
			 * saved sigmask is in its frame, and we can
			 * clear the TS_RESTORE_SIGMASK flag.
			 */
			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;

			tracehook_signal_handler(signr, &info, &ka, regs,
					test_thread_flag(TIF_SINGLESTEP));
			return;
		}
	}

	/* Did we come from a system call? */
	if (regs->syscall_nr >= 0) {
		/* Restart the system call - no handlers present */
		switch (regs->regs[REG_RET]) {
		case -ERESTARTNOHAND:
		case -ERESTARTSYS:
		case -ERESTARTNOINTR:
			/* Decode Syscall # */
			regs->regs[REG_RET] = regs->syscall_nr;
			regs->pc -= 4;
			break;

		case -ERESTART_RESTARTBLOCK:
			regs->regs[REG_RET] = __NR_restart_syscall;
			regs->pc -= 4;
			break;
		}
	}

	/* No signal to deliver -- put the saved sigmask back */
	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
	}

	return;
}
Exemple #26
0
/* Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 */
static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
{
	struct k_sigaction ka;
	int restart_syscall;
	sigset_t *oldset;
	siginfo_t info;
	int signr;
	
	if (pt_regs_is_syscall(regs) &&
	    (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
		restart_syscall = 1;
	} else
		restart_syscall = 0;

	if (current_thread_info()->status & TS_RESTORE_SIGMASK)
		oldset = &current->saved_sigmask;
	else
		oldset = &current->blocked;

#ifdef CONFIG_COMPAT
	if (test_thread_flag(TIF_32BIT)) {
		extern void do_signal32(sigset_t *, struct pt_regs *,
					int restart_syscall,
					unsigned long orig_i0);
		do_signal32(oldset, regs, restart_syscall, orig_i0);
		return;
	}
#endif	

	signr = get_signal_to_deliver(&info, &ka, regs, NULL);

	/* If the debugger messes with the program counter, it clears
	 * the software "in syscall" bit, directing us to not perform
	 * a syscall restart.
	 */
	if (restart_syscall && !pt_regs_is_syscall(regs))
		restart_syscall = 0;

	if (signr > 0) {
		if (restart_syscall)
			syscall_restart(orig_i0, regs, &ka.sa);
		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
			/* A signal was successfully delivered; the saved
			 * sigmask will have been stored in the signal frame,
			 * and will be restored by sigreturn, so we can simply
			 * clear the TS_RESTORE_SIGMASK flag.
			 */
			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
		}
		return;
	}
	if (restart_syscall &&
	    (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
	     regs->u_regs[UREG_I0] == ERESTARTSYS ||
	     regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
		/* replay the system call when we are done */
		regs->u_regs[UREG_I0] = orig_i0;
		regs->tpc -= 4;
		regs->tnpc -= 4;
		pt_regs_clear_syscall(regs);
	}
	if (restart_syscall &&
	    regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
		regs->u_regs[UREG_G1] = __NR_restart_syscall;
		regs->tpc -= 4;
		regs->tnpc -= 4;
		pt_regs_clear_syscall(regs);
	}

	/* If there's no signal to deliver, we just put the saved sigmask
	 * back
	 */
	if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
	}
}
Exemple #27
0
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
	struct ucontext __user *ucp = (struct ucontext __user *)
		regs->u_regs[UREG_I0];
	enum ctx_state prev_state = exception_enter();
	mc_gregset_t __user *grp;
	mcontext_t __user *mcp;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	synchronize_user_stack();
	if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
		goto do_sigsegv;

#if 1
	fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
	fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
#endif
		
	mcp = &ucp->uc_mcontext;
	grp = &mcp->mc_gregs;

	/* Skip over the trap instruction, first. */
	if (test_thread_flag(TIF_32BIT)) {
		regs->tpc   = (regs->tnpc & 0xffffffff);
		regs->tnpc  = (regs->tnpc + 4) & 0xffffffff;
	} else {
		regs->tpc   = regs->tnpc;
		regs->tnpc += 4;
	}
	err = 0;
	if (_NSIG_WORDS == 1)
		err |= __put_user(current->blocked.sig[0],
				  (unsigned long __user *)&ucp->uc_sigmask);
	else
		err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
				      sizeof(sigset_t));

	err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
	err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
	err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
	err |= __put_user(regs->y, &((*grp)[MC_Y]));
	err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
	err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
	err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
	err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
	err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
	err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
	err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
	err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
	err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
	err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
	err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
	err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
	err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
	err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
	err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));

	err |= __get_user(fp,
		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __get_user(i7,
		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
	err |= __put_user(fp, &(mcp->mc_fp));
	err |= __put_user(i7, &(mcp->mc_i7));

	err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
	if (fenab) {
		unsigned long *fpregs = current_thread_info()->fpregs;
		unsigned long fprs;
		
		fprs = current_thread_info()->fpsaved[0];
		if (fprs & FPRS_DL)
			err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
					    (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_to_user(
                          ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
			  (sizeof(unsigned int) * 32));
		err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
		err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
		err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
	}
	if (err)
		goto do_sigsegv;
out:
	exception_exit(prev_state);
	return;
do_sigsegv:
	force_sig(SIGSEGV, current);
	goto out;
}
Exemple #28
0
static inline int rcu_cpu(void)
{
       return current_thread_info()->cpu;
}
Exemple #29
0
static inline int
setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	int wsaved, err, sf_size;
	void __user *tail;

	/* 1. Make sure everything is clean */
	synchronize_user_stack();
	save_and_clear_fpu();
	
	wsaved = get_thread_wsaved();

	sf_size = sizeof(struct rt_signal_frame);
	if (current_thread_info()->fpsaved[0] & FPRS_FEF)
		sf_size += sizeof(__siginfo_fpu_t);
	if (wsaved)
		sf_size += sizeof(__siginfo_rwin_t);
	sf = (struct rt_signal_frame __user *)
		get_sigframe(ksig, regs, sf_size);

	if (invalid_frame_pointer (sf)) {
		do_exit(SIGILL);	/* won't return, actually */
		return -EINVAL;
	}

	tail = (sf + 1);

	/* 2. Save the current process state */
	err = copy_to_user(&sf->regs, regs, sizeof (*regs));

	if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
		__siginfo_fpu_t __user *fpu_save = tail;
		tail += sizeof(__siginfo_fpu_t);
		err |= save_fpu_state(regs, fpu_save);
		err |= __put_user((u64)fpu_save, &sf->fpu_save);
	} else {
		err |= __put_user(0, &sf->fpu_save);
	}
	if (wsaved) {
		__siginfo_rwin_t __user *rwin_save = tail;
		tail += sizeof(__siginfo_rwin_t);
		err |= save_rwin_state(wsaved, rwin_save);
		err |= __put_user((u64)rwin_save, &sf->rwin_save);
		set_thread_wsaved(0);
	} else {
		err |= __put_user(0, &sf->rwin_save);
	}
	
	/* Setup sigaltstack */
	err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]);

	err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t));

	if (!wsaved) {
		err |= copy_in_user((u64 __user *)sf,
				    (u64 __user *)(regs->u_regs[UREG_FP] +
						   STACK_BIAS),
				    sizeof(struct reg_window));
	} else {
		struct reg_window *rp;

		rp = &current_thread_info()->reg_window[wsaved - 1];
		err |= copy_to_user(sf, rp, sizeof(struct reg_window));
	}
	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
		err |= copy_siginfo_to_user(&sf->info, &ksig->info);
	else {
		err |= __put_user(ksig->sig, &sf->info.si_signo);
		err |= __put_user(SI_NOINFO, &sf->info.si_code);
	}
	if (err)
		return err;
	
	/* 3. signal handler back-trampoline and parameters */
	regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
	regs->u_regs[UREG_I0] = ksig->sig;
	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;

	/* The sigcontext is passed in this way because of how it
	 * is defined in GLIBC's /usr/include/bits/sigcontext.h
	 * for sparc64.  It includes the 128 bytes of siginfo_t.
	 */
	regs->u_regs[UREG_I2] = (unsigned long) &sf->info;

	/* 5. signal handler */
	regs->tpc = (unsigned long) ksig->ka.sa.sa_handler;
	regs->tnpc = (regs->tpc + 4);
	if (test_thread_flag(TIF_32BIT)) {
		regs->tpc &= 0xffffffff;
		regs->tnpc &= 0xffffffff;
	}
	/* 4. return to kernel instructions */
	regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
	return 0;
}
static int CVE_2014_0205_linux2_6_30_2_futex_wait(u32 __user *uaddr, int fshared,
		      u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
	struct task_struct *curr = current;
	struct restart_block *restart;
	DECLARE_WAITQUEUE(wait, curr);
	struct futex_hash_bucket *hb;
	struct futex_q q;
	u32 uval;
	int ret;
	struct hrtimer_sleeper t;
	int rem = 0;

	if (!bitset)
		return -EINVAL;

	q.pi_state = NULL;
	q.bitset = bitset;
retry:
	q.key = FUTEX_KEY_INIT;
	ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
	if (unlikely(ret != 0))
		goto out;

retry_private:
	hb = queue_lock(&q);

	/*
	 * Access the page AFTER the hash-bucket is locked.
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) CVE_2014_0205_linux2_6_30_2_futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
	 * any cond.  If we queued after testing *uaddr, that would open
	 * a race condition where we could block indefinitely with
	 * cond(var) false, which would violate the guarantee.
	 *
	 * A consequence is that CVE_2014_0205_linux2_6_30_2_futex_wait() can return zero and absorb
	 * a wakeup when *uaddr != val on entry to the syscall.  This is
	 * rare, but normal.
	 *
	 * For shared futexes, we hold the mmap semaphore, so the mapping
	 * cannot have changed since we looked it up in get_futex_key.
	 */
	ret = get_futex_value_locked(&uval, uaddr);

	if (unlikely(ret)) {
		queue_unlock(&q, hb);

		ret = get_user(uval, uaddr);
		if (ret)
			goto out_put_key;

		if (!fshared)
			goto retry_private;

		put_futex_key(fshared, &q.key);
		goto retry;
	}
	ret = -EWOULDBLOCK;
	if (unlikely(uval != val)) {
		queue_unlock(&q, hb);
		goto out_put_key;
	}

	/* Only actually queue if *uaddr contained val.  */
	queue_me(&q, hb);

	/*
	 * There might have been scheduling since the queue_me(), as we
	 * cannot hold a spinlock across the get_user() in case it
	 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
	 * queueing ourselves into the futex hash.  This code thus has to
	 * rely on the futex_wake() code removing us from hash when it
	 * wakes us up.
	 */

	/* add_wait_queue is the barrier after __set_current_state. */
	__set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&q.waiter, &wait);
	/*
	 * !plist_node_empty() is safe here without any lock.
	 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
	 */
	if (likely(!plist_node_empty(&q.list))) {
		if (!abs_time)
			schedule();
		else {
			hrtimer_init_on_stack(&t.timer,
					      clockrt ? CLOCK_REALTIME :
					      CLOCK_MONOTONIC,
					      HRTIMER_MODE_ABS);
			hrtimer_init_sleeper(&t, current);
			hrtimer_set_expires_range_ns(&t.timer, *abs_time,
						     current->timer_slack_ns);

			hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
			if (!hrtimer_active(&t.timer))
				t.task = NULL;

			/*
			 * the timer could have already expired, in which
			 * case current would be flagged for rescheduling.
			 * Don't bother calling schedule.
			 */
			if (likely(t.task))
				schedule();

			hrtimer_cancel(&t.timer);

			/* Flag if a timeout occured */
			rem = (t.task == NULL);

			destroy_hrtimer_on_stack(&t.timer);
		}
	}
	__set_current_state(TASK_RUNNING);

	/*
	 * NOTE: we don't remove ourselves from the waitqueue because
	 * we are the only user of it.
	 */

	/* If we were woken (and unqueued), we succeeded, whatever. */
	ret = 0;
	if (!unqueue_me(&q))
		goto out_put_key;
	ret = -ETIMEDOUT;
	if (rem)
		goto out_put_key;

	/*
	 * We expect signal_pending(current), but another thread may
	 * have handled it for us already.
	 */
	ret = -ERESTARTSYS;
	if (!abs_time)
		goto out_put_key;

	restart = &current_thread_info()->restart_block;
	restart->fn = CVE_2014_0205_linux2_6_30_2_futex_wait_restart;
	restart->futex.uaddr = (u32 *)uaddr;
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
	restart->futex.flags = 0;

	if (fshared)
		restart->futex.flags |= FLAGS_SHARED;
	if (clockrt)
		restart->futex.flags |= FLAGS_CLOCKRT;

	ret = -ERESTART_RESTARTBLOCK;

out_put_key:
	put_futex_key(fshared, &q.key);
out:
	return ret;
}