Beispiel #1
0
int restore_sigcontext(struct pt_regs *regs,
		       struct sigcontext __user *sc)
{
	int err = 0;
	int i;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	/*
	 * Enforce that sigcontext is like pt_regs, and doesn't mess
	 * up our stack alignment rules.
	 */
	BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
	BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);

	for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
		err |= __get_user(regs->regs[i], &sc->gregs[i]);

	/* Ensure that the PL is always set to USER_PL. */
	regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));

	regs->faultnum = INT_SWINT_1_SIGRETURN;

	return err;
}
Beispiel #2
0
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
			  struct pt_regs *regs)
{
	unsigned long restorer;
	struct rt_sigframe __user *frame;
	int err = 0, sig = ksig->sig;

	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto err;

	/* Always write at least the signal number for the stack backtracer. */
	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
		/* At sigreturn time, restore the callee-save registers too. */
		err |= copy_siginfo_to_user(&frame->info, &ksig->info);
		regs->flags |= PT_FLAGS_RESTORE_REGS;
	} else {
		err |= __put_user(ksig->info.si_signo, &frame->info.si_signo);
	}

	/* Create the ucontext.  */
	err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(NULL, &frame->uc.uc_link);
	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
	if (err)
		goto err;

	restorer = VDSO_SYM(&__vdso_rt_sigreturn);
	if (ksig->ka.sa.sa_flags & SA_RESTORER)
		restorer = (unsigned long) ksig->ka.sa.sa_restorer;

	/*
	 * Set up registers for signal handler.
	 * Registers that we don't modify keep the value they had from
	 * user-space at the time we took the signal.
	 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
	 * since some things rely on this (e.g. glibc's debug/segfault.c).
	 */
	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
	regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
	regs->sp = (unsigned long) frame;
	regs->lr = restorer;
	regs->regs[0] = (unsigned long) sig;
	regs->regs[1] = (unsigned long) &frame->info;
	regs->regs[2] = (unsigned long) &frame->uc;
	regs->flags |= PT_FLAGS_CALLER_SAVES;
	return 0;

err:
	trace_unhandled_signal("bad sigreturn frame", regs,
			      (unsigned long)frame, SIGSEGV);
	return -EFAULT;
}
Beispiel #3
0
/* Put registers back to task. */
static void putregs(struct task_struct *child, struct pt_regs *uregs)
{
	struct pt_regs *regs = task_pt_regs(child);

	/* Don't allow overwriting the kernel-internal flags word. */
	uregs->flags = regs->flags;

	/* Only allow setting the ICS bit in the ex1 word. */
	uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));

	*regs = *uregs;
}
/* Put registers back to task. */
static void putregs(struct task_struct *child, struct pt_regs *uregs)
{
	struct pt_regs *regs = task_pt_regs(child);

	/*
	 * Don't allow overwriting the kernel-internal flags word.
	 * But do set PT_FLAGS_RESTORE_REGS so that the kernel will reload
	 * all the callee-saved registers when returning to userspace.
	 */ 
	uregs->flags = regs->flags | PT_FLAGS_RESTORE_REGS;

	/* Only allow setting the ICS bit in the ex1 word. */
	uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));

	*regs = *uregs;
}
int restore_sigcontext(struct pt_regs *regs,
		       struct sigcontext __user *sc)
{
	int err = 0;
	int i;

	
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
	BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);

	for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
		err |= __get_user(regs->regs[i], &sc->gregs[i]);

	
	regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));

	regs->faultnum = INT_SWINT_1_SIGRETURN;

	return err;
}
Beispiel #6
0
int restore_sigcontext(struct pt_regs *regs,
		       struct sigcontext __user *sc)
{
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current->restart_block.fn = do_no_restart_syscall;

	/*
	 * Enforce that sigcontext is like pt_regs, and doesn't mess
	 * up our stack alignment rules.
	 */
	BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
	BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
	err = __copy_from_user(regs, sc, sizeof(*regs));

	/* Ensure that the PL is always set to USER_PL. */
	regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));

	regs->faultnum = INT_SWINT_1_SIGRETURN;

	return err;
}
long arch_ptrace(struct task_struct *child, long request,
		 unsigned long addr, unsigned long data)
{
	unsigned long __user *datap = (long __user __force *)data;
	unsigned long tmp;
	long ret = -EIO;
	char *childreg;
	struct pt_regs copyregs;
	int ex1_offset;

	switch (request) {

	case PTRACE_PEEKUSR:  /*                             */
		if (addr >= PTREGS_SIZE)
			break;
		childreg = (char *)task_pt_regs(child) + addr;
#ifdef CONFIG_COMPAT
		if (is_compat_task()) {
			if (addr & (sizeof(compat_long_t)-1))
				break;
			ret = put_user(*(compat_long_t *)childreg,
				       (compat_long_t __user *)datap);
		} else
#endif
		{
			if (addr & (sizeof(long)-1))
				break;
			ret = put_user(*(long *)childreg, datap);
		}
		break;

	case PTRACE_POKEUSR:  /*                            */
		if (addr >= PTREGS_SIZE)
			break;
		childreg = (char *)task_pt_regs(child) + addr;

		/*                                                  */
		ex1_offset = PTREGS_OFFSET_EX1;
#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
		if (is_compat_task())   /*                   */
			ex1_offset += sizeof(compat_long_t);
#endif
		if (addr == ex1_offset)
			data = PL_ICS_EX1(USER_PL, EX1_ICS(data));

#ifdef CONFIG_COMPAT
		if (is_compat_task()) {
			if (addr & (sizeof(compat_long_t)-1))
				break;
			*(compat_long_t *)childreg = data;
		} else
#endif
		{
			if (addr & (sizeof(long)-1))
				break;
			*(long *)childreg = data;
		}
		ret = 0;
		break;

	case PTRACE_GETREGS:  /*                                   */
		if (copy_to_user(datap, task_pt_regs(child),
				 sizeof(struct pt_regs)) == 0) {
			ret = 0;
		}
		break;

	case PTRACE_SETREGS:  /*                                 */
		if (copy_from_user(&copyregs, datap,
				   sizeof(struct pt_regs)) == 0) {
			copyregs.ex1 =
				PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
			*task_pt_regs(child) = copyregs;
			ret = 0;
		}
		break;

	case PTRACE_GETFPREGS:  /*                          */
	case PTRACE_SETFPREGS:  /*                          */
		break;

	case PTRACE_SETOPTIONS:
		/*                                       */
		child->ptrace &= ~PT_TRACE_MASK_TILE;
		tmp = data & PTRACE_O_MASK_TILE;
		data &= ~PTRACE_O_MASK_TILE;
		ret = ptrace_request(child, request, addr, data);
		if (tmp & PTRACE_O_TRACEMIGRATE)
			child->ptrace |= PT_TRACE_MIGRATE;
		break;

	default:
#ifdef CONFIG_COMPAT
		if (task_thread_info(current)->status & TS_COMPAT) {
			ret = compat_ptrace_request(child, request,
						    addr, data);
			break;
		}
#endif
		ret = ptrace_request(child, request, addr, data);
		break;
	}

	return ret;
}
Beispiel #8
0
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			   sigset_t *set, struct pt_regs *regs)
{
	unsigned long restorer;
	struct rt_sigframe __user *frame;
	int err = 0;
	int usig;

	frame = get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	usig = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	/* Always write at least the signal number for the stack backtracer. */
	if (ka->sa.sa_flags & SA_SIGINFO) {
		/* At sigreturn time, restore the callee-save registers too. */
		err |= copy_siginfo_to_user(&frame->info, info);
		regs->flags |= PT_FLAGS_RESTORE_REGS;
	} else {
		err |= __put_user(info->si_signo, &frame->info.si_signo);
	}

	/* Create the ucontext.  */
	err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(NULL, &frame->uc.uc_link);
	err |= __put_user((void __user *)(current->sas_ss_sp),
			  &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->sp),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
	if (err)
		goto give_sigsegv;

	restorer = VDSO_BASE;
	if (ka->sa.sa_flags & SA_RESTORER)
		restorer = (unsigned long) ka->sa.sa_restorer;

	/*
	 * Set up registers for signal handler.
	 * Registers that we don't modify keep the value they had from
	 * user-space at the time we took the signal.
	 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
	 * since some things rely on this (e.g. glibc's debug/segfault.c).
	 */
	regs->pc = (unsigned long) ka->sa.sa_handler;
	regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
	regs->sp = (unsigned long) frame;
	regs->lr = restorer;
	regs->regs[0] = (unsigned long) usig;
	regs->regs[1] = (unsigned long) &frame->info;
	regs->regs[2] = (unsigned long) &frame->uc;
	regs->flags |= PT_FLAGS_CALLER_SAVES;

	/*
	 * Notify any tracer that was single-stepping it.
	 * The tracer may want to single-step inside the
	 * handler too.
	 */
	if (test_thread_flag(TIF_SINGLESTEP))
		ptrace_notify(SIGTRAP);

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			  sigset_t *set, struct pt_regs *regs)
{
	unsigned long restorer;
	struct compat_rt_sigframe __user *frame;
	int err = 0;
	int usig;

	frame = compat_get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	usig = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	
	if (ka->sa.sa_flags & SA_SIGINFO) {
		
		err |= copy_siginfo_to_user32(&frame->info, info);
		regs->flags |= PT_FLAGS_RESTORE_REGS;
	} else {
		err |= __put_user(info->si_signo, &frame->info.si_signo);
	}

	
	err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)),
			  &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->sp),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
	if (err)
		goto give_sigsegv;

	restorer = VDSO_BASE;
	if (ka->sa.sa_flags & SA_RESTORER)
		restorer = ptr_to_compat_reg(ka->sa.sa_restorer);

	regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
	regs->ex1 = PL_ICS_EX1(USER_PL, 1); 
	regs->sp = ptr_to_compat_reg(frame);
	regs->lr = restorer;
	regs->regs[0] = (unsigned long) usig;
	regs->regs[1] = ptr_to_compat_reg(&frame->info);
	regs->regs[2] = ptr_to_compat_reg(&frame->uc);
	regs->flags |= PT_FLAGS_CALLER_SAVES;

	if (test_thread_flag(TIF_SINGLESTEP))
		ptrace_notify(SIGTRAP);

	return 0;

give_sigsegv:
	signal_fault("bad setup frame", regs, frame, sig);
	return -EFAULT;
}
Beispiel #10
0
/*
 * When we take an ITLB or DTLB fault or access violation in the
 * supervisor while the critical section bit is set, the hypervisor is
 * reluctant to write new values into the EX_CONTEXT_K_x registers,
 * since that might indicate we have not yet squirreled the SPR
 * contents away and can thus safely take a recursive interrupt.
 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
 *
 * Note that this routine is called before homecache_tlb_defer_enter(),
 * which means that we can properly unlock any atomics that might
 * be used there (good), but also means we must be very sensitive
 * to not touch any data structures that might be located in memory
 * that could migrate, as we could be entering the kernel on a dataplane
 * cpu that has been deferring kernel TLB updates.  This means, for
 * example, that we can't migrate init_mm or its pgd.
 */
struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
				      unsigned long address,
				      unsigned long info)
{
	unsigned long pc = info & ~1;
	int write = info & 1;
	pgd_t *pgd = get_current_pgd();

	/* Retval is 1 at first since we will handle the fault fully. */
	struct intvec_state state = {
		do_page_fault, fault_num, address, write, 1
	};

	/* Validate that we are plausibly in the right routine. */
	if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
	    (fault_num != INT_DTLB_MISS &&
	     fault_num != INT_DTLB_ACCESS)) {
		unsigned long old_pc = regs->pc;
		regs->pc = pc;
		ics_panic("Bad ICS page fault args:"
			  " old PC %#lx, fault %d/%d at %#lx\n",
			  old_pc, fault_num, write, address);
	}

	/* We might be faulting on a vmalloc page, so check that first. */
	if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
		return state;

	/*
	 * If we faulted with ICS set in sys_cmpxchg, we are providing
	 * a user syscall service that should generate a signal on
	 * fault.  We didn't set up a kernel stack on initial entry to
	 * sys_cmpxchg, but instead had one set up by the fault, which
	 * (because sys_cmpxchg never releases ICS) came to us via the
	 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
	 * still referencing the original user code.  We release the
	 * atomic lock and rewrite pt_regs so that it appears that we
	 * came from user-space directly, and after we finish the
	 * fault we'll go back to user space and re-issue the swint.
	 * This way the backtrace information is correct if we need to
	 * emit a stack dump at any point while handling this.
	 *
	 * Must match register use in sys_cmpxchg().
	 */
	if (pc >= (unsigned long) sys_cmpxchg &&
	    pc < (unsigned long) __sys_cmpxchg_end) {
#ifdef CONFIG_SMP
		/* Don't unlock before we could have locked. */
		if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
			int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
			__atomic_fault_unlock(lock_ptr);
		}
#endif
		regs->sp = regs->regs[27];
	}

	/*
	 * We can also fault in the atomic assembly, in which
	 * case we use the exception table to do the first-level fixup.
	 * We may re-fixup again in the real fault handler if it
	 * turns out the faulting address is just bad, and not,
	 * for example, migrating.
	 */
	else if (pc >= (unsigned long) __start_atomic_asm_code &&
		   pc < (unsigned long) __end_atomic_asm_code) {
		const struct exception_table_entry *fixup;
#ifdef CONFIG_SMP
		/* Unlock the atomic lock. */
		int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
		__atomic_fault_unlock(lock_ptr);
#endif
		fixup = search_exception_tables(pc);
		if (!fixup)
			ics_panic("ICS atomic fault not in table:"
				  " PC %#lx, fault %d", pc, fault_num);
		regs->pc = fixup->fixup;
		regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
	}

	/*
	 * Now that we have released the atomic lock (if necessary),
	 * it's safe to spin if the PTE that caused the fault was migrating.
	 */
	if (fault_num == INT_DTLB_ACCESS)
		write = 1;
	if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
		return state;

	/* Return zero so that we continue on with normal fault handling. */
	state.retval = 0;
	return state;
}
Beispiel #11
0
int copy_thread(unsigned long clone_flags, unsigned long sp,
		unsigned long stack_size,
		struct task_struct *p, struct pt_regs *regs)
{
	struct pt_regs *childregs;
	unsigned long ksp;

	/*
	 * When creating a new kernel thread we pass sp as zero.
	 * Assign it to a reasonable value now that we have the stack.
	 */
	if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
		sp = KSTK_TOP(p);

	/*
	 * Do not clone step state from the parent; each thread
	 * must make its own lazily.
	 */
	task_thread_info(p)->step_state = NULL;

	/*
	 * Start new thread in ret_from_fork so it schedules properly
	 * and then return from interrupt like the parent.
	 */
	p->thread.pc = (unsigned long) ret_from_fork;

	/* Save user stack top pointer so we can ID the stack vm area later. */
	p->thread.usp0 = sp;

	/* Record the pid of the process that created this one. */
	p->thread.creator_pid = current->pid;

	/*
	 * Copy the registers onto the kernel stack so the
	 * return-from-interrupt code will reload it into registers.
	 */
	childregs = task_pt_regs(p);
	*childregs = *regs;
	childregs->regs[0] = 0;         /* return value is zero */
	childregs->sp = sp;  /* override with new user stack pointer */

	/*
	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
	 * which is passed in as arg #5 to sys_clone().
	 */
	if (clone_flags & CLONE_SETTLS)
		childregs->tp = regs->regs[4];

	/*
	 * Copy the callee-saved registers from the passed pt_regs struct
	 * into the context-switch callee-saved registers area.
	 * This way when we start the interrupt-return sequence, the
	 * callee-save registers will be correctly in registers, which
	 * is how we assume the compiler leaves them as we start doing
	 * the normal return-from-interrupt path after calling C code.
	 * Zero out the C ABI save area to mark the top of the stack.
	 */
	ksp = (unsigned long) childregs;
	ksp -= C_ABI_SAVE_AREA_SIZE;   /* interrupt-entry save area */
	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
	ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
	memcpy((void *)ksp, &regs->regs[CALLEE_SAVED_FIRST_REG],
	       CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
	ksp -= C_ABI_SAVE_AREA_SIZE;   /* __switch_to() save area */
	((long *)ksp)[0] = ((long *)ksp)[1] = 0;
	p->thread.ksp = ksp;

#if CHIP_HAS_TILE_DMA()
	/*
	 * No DMA in the new thread.  We model this on the fact that
	 * fork() clears the pending signals, alarms, and aio for the child.
	 */
	memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
	memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
#endif

#if CHIP_HAS_SN_PROC()
	/* Likewise, the new thread is not running static processor code. */
	p->thread.sn_proc_running = 0;
	memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
#endif

#if CHIP_HAS_PROC_STATUS_SPR()
	/* New thread has its miscellaneous processor state bits clear. */
	p->thread.proc_status = 0;
#endif

#ifdef CONFIG_HARDWALL
	/* New thread does not own any networks. */
	p->thread.hardwall = NULL;
#endif


	/*
	 * Start the new thread with the current architecture state
	 * (user interrupt masks, etc.).
	 */
	save_arch_state(&p->thread);

	return 0;
}