Exemple #1
0
asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
				unsigned long r6, unsigned long r7,
				struct pt_regs __regs)
{
	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
	sigset_t set;
	stack_t st;
	int r0;

	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;

	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
		goto badframe;

	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
		goto badframe;
	/* It is more difficult to avoid calling this function than to
	   call it and ignore errors.  */
	do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);

	return r0;

badframe:
	force_sig(SIGSEGV, current);
	return 0;
}
Exemple #2
0
asmlinkage void
do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
	     struct switch_stack *sw)
{
	sigset_t set;

	/* Verify that it's a good sigcontext before using it */
	if (verify_area(VERIFY_READ, sc, sizeof(*sc)))
		goto give_sigsegv;
	if (__get_user(set.sig[0], &sc->sc_mask))
		goto give_sigsegv;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(sc, regs, sw))
		goto give_sigsegv;

	/* Send SIGTRAP if we're single-stepping: */
	if (ptrace_cancel_bpt (current)) {
		siginfo_t info;

		info.si_signo = SIGTRAP;
		info.si_errno = 0;
		info.si_code = TRAP_BRKPT;
		info.si_addr = (void __user *) regs->pc;
		info.si_trapno = 0;
		send_sig_info(SIGTRAP, &info, current);
	}
	return;

give_sigsegv:
	force_sig(SIGSEGV, current);
}
asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
	sigset_t set;
	stack_t st;

	/*
                                                    
                                                     
                                                 
  */
	if (((long)frame) & 3)
		goto badframe;

	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	set_current_blocked(&set);

	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
		goto badframe;

	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
		goto badframe;
	/*                                                            
                                */
	do_sigaltstack(&st, NULL, regs->sp);

	return regs->gpr[11];

badframe:
	force_sig(SIGSEGV, current);
	return 0;
}
Exemple #4
0
asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
	struct sigframe __user *frame;
	sigset_t blocked;
	int sig;

	frame = (struct sigframe __user *) regs.regs[29];
	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
		goto badframe;

	sigdelsetmask(&blocked, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = blocked;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	sig = restore_sigcontext(&regs, &frame->sf_sc);
	if (sig < 0)
		goto badframe;
	else if (sig)
		force_sig(sig, current);

	/*
	 * Don't let your children do this ...
	 */
	__asm__ __volatile__(
		"move\t$29, %0\n\t"
		"j\tsyscall_exit"
		:/* no outputs */
		:"r" (&regs));
	/* Unreached */

badframe:
	force_sig(SIGSEGV, current);
}
asmlinkage void
do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
		struct switch_stack *sw)
{
	sigset_t set;

	/* Verify that it's a good ucontext_t before using it */
	if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc)))
		goto give_sigsegv;
	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
		goto give_sigsegv;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
		goto give_sigsegv;

	/* Send SIGTRAP if we're single-stepping: */
	if (ptrace_cancel_bpt (current)) {
		siginfo_t info;

		info.si_signo = SIGTRAP;
		info.si_errno = 0;
		info.si_code = TRAP_BRKPT;
		info.si_addr = (void __user *) regs->pc;
		info.si_trapno = 0;
		send_sig_info(SIGTRAP, &info, current);
	}
	return;

give_sigsegv:
	force_sig(SIGSEGV, current);
}
Exemple #6
0
asmlinkage int sys_rt_sigreturn(long r10, long r11, long r12, long r13,
                                long mof, long srp, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame = (struct rt_sigframe *)rdusp();
	sigset_t set;

        /*
         * Since we stacked the signal on a dword boundary,
         * then frame should be dword aligned here.  If it's
         * not, then the user is trying to mess with us.
         */
        if (((long)frame) & 3)
                goto badframe;

	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
		goto badframe;

	if (do_sigaltstack(&frame->uc.uc_stack, NULL, rdusp()) == -EFAULT)
		goto badframe;

	return regs->r10;

badframe:
	force_sig(SIGSEGV, current);
	return 0;
}
asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
				unsigned long r6, unsigned long r7,
				struct pt_regs __regs)
{
	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
	sigset_t set;
	int r0;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;

	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
		goto badframe;

	if (do_sigaltstack(&frame->uc.uc_stack, NULL,
			   regs->regs[15]) == -EFAULT)
		goto badframe;

	return r0;

badframe:
	force_sig(SIGSEGV, current);
	return 0;
}
Exemple #8
0
void sys32_sigreturn(struct pt_regs *regs)
{
	struct sigframe *frame;
	sigset_t blocked;

	frame = (struct sigframe *) MIPS_sp(regs);
	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
		goto badframe;

	sigdelsetmask(&blocked, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = blocked;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext32(regs, &frame->sf_sc))
		goto badframe;

	/*
	 * Don't let your children do this ...
	 */
	BUG();
//XXX	if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
//		do_syscall_trace();
//	__asm__ __volatile__(
//		"move\t$29, %0\n\t"
//		"j\tsyscall_exit"
//		:/* no outputs */
//		:"r" (&regs));
	/* Unreached */

badframe:
	force_sig(SIGSEGV, current);
}
Exemple #9
0
asmlinkage int
sys_rt_sigreturn(unsigned long r0, unsigned long r1,
		 unsigned long r2, unsigned long r3, unsigned long r4,
		 unsigned long r5, unsigned long r6, struct pt_regs regs)
{
	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.spu;
	sigset_t set;
	stack_t st;
	int result;

	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &result))
		goto badframe;

	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
		goto badframe;
	/* It is more difficult to avoid calling this function than to
	   call it and ignore errors.  */
	do_sigaltstack(&st, NULL, regs.spu);

	return result;

badframe:
	force_sig(SIGSEGV, current);
	return 0;
}
asmlinkage int
sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		 unsigned long r6, unsigned long r7, unsigned long r8,
		 struct pt_regs *regs)
{
	struct ucontext *uc = (struct ucontext *)regs->gpr[1];
	sigset_t set;
	stack_t st;

	if (verify_area(VERIFY_READ, uc, sizeof(*uc)))
		goto badframe;

	if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sigmask_lock);
	current->blocked = set;
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	if (restore_sigcontext(regs, NULL, &uc->uc_mcontext))
		goto badframe;

	if (__copy_from_user(&st, &uc->uc_stack, sizeof(st)))
		goto badframe;

	/* This function sets back the stack flags into
	   the current task structure.  */
	sys_sigaltstack(&st, NULL, 0, 0, 0, 0, regs);

	return regs->result;

badframe:
	do_exit(SIGSEGV);
}
Exemple #11
0
static int
efab_signal_do_sigaction(int sig, struct sigaction *act,
                         struct sigaction *oact,
                         struct mm_signal_data *tramp_data,
                         int *out_pass_to_kernel)
{
  int rc = 0;

  if( !valid_signal(sig) || sig < 1 || (act != NULL && sig_kernel_only(sig)) )
    return -EINVAL;


  if( oact != NULL ) {
    rc = efab_signal_report_sigaction(sig, oact, tramp_data);
    if( rc != 0 )
      return rc;
  }

  if( act != NULL ) {
    sigdelsetmask(&act->sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));

  /* If the signal is ignored now, we should ignore all already-pending
   * signals.  Instead of doing it, pass this to OS. */
    if( act->sa_handler == SIG_IGN ||
        (act->sa_handler == SIG_DFL && sig_kernel_ignore(sig)) )
      *out_pass_to_kernel = 1;
    else if( act->sa_flags & SA_ONSTACK && !tramp_data->sa_onstack_intercept )
      *out_pass_to_kernel = 1;
    else
      rc = efab_signal_substitute(sig, act, tramp_data);
  }
  else
    efab_signal_recheck(sig, tramp_data);

  return rc;
}
Exemple #12
0
asmlinkage void
do_rt_sigreturn(struct rt_sigframe *frame, struct pt_regs *regs,
                struct switch_stack *sw)
{
    sigset_t set;
    stack_t st;

    /* Verify that it's a good sigcontext before using it */
    if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
        goto give_sigsegv;
    if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
        goto give_sigsegv;

    sigdelsetmask(&set, ~_BLOCKABLE);
    spin_lock_irq(&current->sigmask_lock);
    current->blocked = set;
    recalc_sigpending(current);
    spin_unlock_irq(&current->sigmask_lock);

    if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
        goto give_sigsegv;

    if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
        goto give_sigsegv;
    /* It is more difficult to avoid calling this function than to
       call it and ignore errors.  */
    do_sigaltstack(&st, NULL, rdusp());

    /* Send SIGTRAP if we're single-stepping: */
    if (ptrace_cancel_bpt (current))
        send_sig(SIGTRAP, current, 1);
    return;

give_sigsegv:
    force_sig(SIGSEGV, current);
}
Exemple #13
0
__attribute_used__ noinline static void
_sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
	struct sigframe *frame;
	sigset_t blocked;

	frame = (struct sigframe *) regs.regs[29];
	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
		goto badframe;

	sigdelsetmask(&blocked, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = blocked;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(&regs, &frame->sf_sc))
		goto badframe;

	/*
	 * Don't let your children do this ...
	 */
	if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
		do_syscall_trace(&regs, 1);
	__asm__ __volatile__(
		"move\t$29, %0\n\t"
		"j\tsyscall_exit"
		:/* no outputs */
		:"r" (&regs));
	/* Unreached */

badframe:
	force_sig(SIGSEGV, current);
}
Exemple #14
0
static_unused int _sys_sigsuspend(struct pt_regs regs)
{
    sigset_t *uset, saveset, newset;

    uset = (sigset_t *) regs.regs[4];
    if (copy_from_user(&newset, uset, sizeof(sigset_t)))
        return -EFAULT;
    sigdelsetmask(&newset, ~_BLOCKABLE);

    spin_lock_irq(&current->sighand->siglock);
    saveset = current->blocked;
    current->blocked = newset;
    recalc_sigpending();
    spin_unlock_irq(&current->sighand->siglock);

    regs.regs[2] = EINTR;
    regs.regs[7] = 1;
    while (1) {
        current->state = TASK_INTERRUPTIBLE;
        schedule();
        if (do_signal(&saveset, &regs))
            return -EINTR;
    }
}
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	sigset_t set;
	unsigned long eax;

	frame = (struct rt_sigframe __user *)(regs->rsp - 8);
	if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
		goto badframe;
	} 
	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) { 
		goto badframe;
	} 

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	
	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
		goto badframe;

#ifdef DEBUG_SIG
	printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax);
#endif

	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
		goto badframe;

	return eax;

badframe:
	signal_fault(regs,frame,"sigreturn");
	return 0;
}	
Exemple #16
0
asmlinkage int sys_sigreturn(unsigned long __unused)
{
	struct pt_regs *regs = (struct pt_regs *) &__unused;
	struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
	sigset_t set;
	int eax;

	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
		goto badframe;
	if (__get_user(set.sig[0], &frame->sc.oldmask)
	    || (_NSIG_WORDS > 1
		&& __copy_from_user(&set.sig[1], &frame->extramask,
				    sizeof(frame->extramask))))
		goto badframe;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	
	if (restore_sigcontext(regs, &frame->sc, &eax))
		goto badframe;
	return eax;

badframe:
	if (show_unhandled_signals && printk_ratelimit())
		printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx"
		       " esp:%lx oeax:%lx\n",
		    current->pid > 1 ? KERN_INFO : KERN_EMERG,
		    current->comm, current->pid, frame, regs->eip,
		    regs->esp, regs->orig_eax);

	force_sig(SIGSEGV, current);
	return 0;
}	
Exemple #17
0
/*
 * Fairly generic implementation for sending a packet
 * should work for tcp, udp and raw connections.
 */
static unsigned int privSocketSend(void *pPtr,
                                   struct iovec *pVec,
                                   unsigned int uVecLength,
                                   unsigned int uDataLength,
                                   void *pName,
                                   unsigned int uNameLength)
{
  raw_socket *pRaw = (raw_socket *)pPtr;
  unsigned long ulFlags;
  sigset_t sigSet;
  siginfo_t sigInfo;
  struct msghdr mHdr;
  unsigned int uSendLength = uDataLength;
  unsigned int uVecCurrent = 0;
  int iRetries;
  int iError;
  unsigned int rvalue = 0;

  if (NULL != pRaw) {
    /* allow sigkill */
    spin_lock_irqsave(&current->sighand->siglock, ulFlags);
    sigSet = current->blocked;
    sigfillset(&current->blocked);
    sigdelsetmask(&current->blocked, sigmask(SIGKILL));
    recalc_sigpending();
    spin_unlock_irqrestore(&current->sighand->siglock, ulFlags);

    mHdr.msg_control = NULL;
    mHdr.msg_controllen = 0;
    mHdr.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;

    /* sockaddr stuff */
    mHdr.msg_name = pName;
    mHdr.msg_namelen = uNameLength;

    while ((true == pRaw->bConnected) && (uSendLength > 0)) {

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))
      mHdr.msg_iov = &pVec [uVecCurrent];
      mHdr.msg_iovlen = uVecLength - uVecCurrent;
#endif
      iRetries = 0;
      iError = 0;
      while ((iRetries++ < MAX_RETRIES) && (true == pRaw->bConnected)) {
        /* Finally the kernel does it magic, */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))
        iError = sock_sendmsg(pRaw->pSocket,
                              &mHdr,
                              uSendLength);
#else
        iError = kernel_sendmsg(pRaw->pSocket,
				&mHdr,
				(struct kvec *)&pVec [uVecCurrent],
				uVecLength - uVecCurrent,
				uSendLength);
#endif
        if (signal_pending(current)) {
          /* dequeue a sigkill and quiet. */
          spin_lock_irqsave(&current->sighand->siglock, ulFlags);
          dequeue_signal(current, &current->blocked, &sigInfo);
          spin_unlock_irqrestore(&current->sighand->siglock, ulFlags);
          break;
        }

        switch(iError)
          {
          case -EAGAIN:
          case -ENOSPC:
            /* For these errors, les sleep then try again. */
            msleep_interruptible(32 << (iRetries % 4));
            break;
          case 0:
            /* Generic TCP has issues, copied from cifs */
            KLEM_MSG("Recv TCP size issue\n");
            msleep_interruptible(500);
            break;
          default:
            /* must have gotten something more interesting, don't try again */
            iRetries = MAX_RETRIES;
            break;
        }
      }

      /* Did we send any data? consider simplification*/
      if (iError > 0) {
        if (iError >= uSendLength) {
          /* All sent, full write */
          uSendLength -= iError;
          rvalue += iError;
        } else {
          /* fix that partial write */
          while ((uVecCurrent < uVecLength) && (iError > 0)) {
            if (iError >= pVec [uVecCurrent].iov_len) {
              /* We have consumed an entire iov */
              uSendLength -= pVec [uVecCurrent].iov_len;
              iError -= pVec [uVecCurrent].iov_len;
              rvalue += pVec [uVecCurrent].iov_len;
              uVecCurrent += 1;
            } else {
              /* Partial iov consumed case */
              pVec [uVecCurrent].iov_len -= iError;
              pVec [uVecCurrent].iov_base += iError;
              uSendLength -= iError;
              rvalue += iError;
              iError = 0;
            }
          }
        }
      }        else {
        /* No data was sent, this is an error. */
        KLEM_LOG("send error %d\n", iError);
        rvalue = 0;
        uSendLength = 0;
      }
    }

    /* no more sigkill. */
    spin_lock_irqsave(&current->sighand->siglock, ulFlags);
    current->blocked = sigSet;
    recalc_sigpending();
    spin_unlock_irqrestore(&current->sighand->siglock, ulFlags);
  }

  return rvalue;
}
Exemple #18
0
void restore_sigmask(sigset_t *set)
{
	sigdelsetmask(set, ~_BLOCKABLE);
	set_current_blocked(set);
}
Exemple #19
0
int ckpt_restore_signals(ckpt_desc_t desc)
{
    int i;
    int ret;
    stack_t sigstack;
    sigset_t sigblocked;

    log_restore_signals("restoring sigstack ...");
    if (ckpt_read(desc, &sigstack, sizeof(stack_t)) != sizeof(stack_t)) {
        log_err("failed to get sigstack");
        return -EIO;
    }

    ret = compat_sigaltstack(current, &sigstack, NULL, 0);
    if (ret) {
        log_err("failed to restore sigstack (ret=%d)", ret);
        return ret;
    }

    log_restore_signals("restoring sigblocked ...");
    if (ckpt_read(desc, &sigblocked, sizeof(sigset_t)) != sizeof(sigset_t)) {
        log_err("failed to restore sigstack");
        return -EIO;
    }

    sigdelsetmask(&sigblocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
    spin_lock_irq(&current->sighand->siglock);
    current->blocked = sigblocked;
    recalc_sigpending();
    spin_unlock_irq(&current->sighand->siglock);

    log_restore_signals("restoring pending ...");
    ret = ckpt_restore_sigpending(&current->pending, 0, desc);
    if (ret) {
        log_err("failed to restore pending");
        return ret;
    }

    ret = ckpt_restore_sigpending(&current->signal->shared_pending, 1, desc);
    if (ret) {
        log_err("failed to restore shared_pending");
        return ret;
    }

    log_restore_signals("restoring sigaction ...");
    for (i = 0; i < _NSIG; i++) {
        struct k_sigaction sigaction;

        if (ckpt_read(desc, &sigaction, sizeof(struct k_sigaction)) != sizeof(struct k_sigaction)) {
            log_err("failed to get sigaction");
            return -EIO;
        }

        if ((i != SIGKILL - 1) && (i != SIGSTOP - 1)) {
            ret = do_sigaction(i + 1, &sigaction, 0);
            if (ret) {
                log_err("failed to restore sigaction (ret=%d)", ret);
                return ret;
            }
        }
    }
    log_restore_pos(desc);
    return 0;
}
Exemple #20
0
asmlinkage void do_sigreturn(struct pt_regs *regs)
{
	struct signal_frame __user *sf;
	unsigned long up_psr, pc, npc;
	sigset_t set;
	__siginfo_fpu_t __user *fpu_save;
	__siginfo_rwin_t __user *rwin_save;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack();

	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];

	/* 1. Make sure we are not getting garbage from the user */
	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
		goto segv_and_exit;

	if (((unsigned long) sf) & 3)
		goto segv_and_exit;

	err = __get_user(pc,  &sf->info.si_regs.pc);
	err |= __get_user(npc, &sf->info.si_regs.npc);

	if ((pc | npc) & 3)
		goto segv_and_exit;

	/* 2. Restore the state */
	up_psr = regs->psr;
	err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));

	/* User can only change condition codes and FPU enabling in %psr. */
	regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
		  | (regs->psr & (PSR_ICC | PSR_EF));

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (fpu_save)
		err |= restore_fpu_state(regs, fpu_save);
	err |= __get_user(rwin_save, &sf->rwin_save);
	if (rwin_save)
		err |= restore_rwin_state(rwin_save);

	/* This is pretty much atomic, no amount locking would prevent
	 * the races which exist anyways.
	 */
	err |= __get_user(set.sig[0], &sf->info.si_mask);
	err |= __copy_from_user(&set.sig[1], &sf->extramask,
			        (_NSIG_WORDS-1) * sizeof(unsigned int));
			   
	if (err)
		goto segv_and_exit;

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;

segv_and_exit:
	force_sig(SIGSEGV, current);
}
Exemple #21
0
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	unsigned int psr, pc, npc;
	__siginfo_fpu_t __user *fpu_save;
	__siginfo_rwin_t __user *rwin_save;
	mm_segment_t old_fs;
	sigset_t set;
	stack_t st;
	int err;

	synchronize_user_stack();
	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
	    (((unsigned long) sf) & 0x03))
		goto segv;

	err = __get_user(pc, &sf->regs.pc);
	err |= __get_user(npc, &sf->regs.npc);
	err |= ((pc | npc) & 0x03);

	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(psr, &sf->regs.psr);

	err |= __copy_from_user(&regs->u_regs[UREG_G1],
				&sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));

	regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (!err && fpu_save)
		err |= restore_fpu_state(regs, fpu_save);
	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	
	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
	
	if (err)
		goto segv;
		
	regs->pc = pc;
	regs->npc = npc;
	
	/* It is more difficult to avoid calling this function than to
	 * call it and ignore errors.
	 */
	old_fs = get_fs();
	set_fs(KERNEL_DS);
	do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
	set_fs(old_fs);

	err |= __get_user(rwin_save, &sf->rwin_save);
	if (!err && rwin_save) {
		if (restore_rwin_state(rwin_save))
			goto segv;
	}

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;
segv:
	force_sig(SIGSEGV, current);
}
Exemple #22
0
int sys32_rt_sigtimedwait(compat_sigset_t *uthese,
	siginfo_t32 *uinfo, struct compat_timespec *uts,
	compat_time_t sigsetsize)
{
	int ret, sig;
	sigset_t these;
	compat_sigset_t these32;
	struct timespec ts;
	siginfo_t info;
	long timeout = 0;

	/*
	 * As the result of a brainfarting competition a few years ago the
	 * size of sigset_t for the 32-bit kernel was choosen to be 128 bits
	 * but nothing so far is actually using that many, 64 are enough.  So
	 * for now we just drop the high bits.
	 */
	if (copy_from_user (&these32, uthese, sizeof(compat_old_sigset_t)))
		return -EFAULT;

	switch (_NSIG_WORDS) {
#ifdef __MIPSEB__
	case 4: these.sig[3] = these32.sig[6] | (((long)these32.sig[7]) << 32);
	case 3: these.sig[2] = these32.sig[4] | (((long)these32.sig[5]) << 32);
	case 2: these.sig[1] = these32.sig[2] | (((long)these32.sig[3]) << 32);
	case 1: these.sig[0] = these32.sig[0] | (((long)these32.sig[1]) << 32);
#endif
#ifdef __MIPSEL__
	case 4: these.sig[3] = these32.sig[7] | (((long)these32.sig[6]) << 32);
	case 3: these.sig[2] = these32.sig[5] | (((long)these32.sig[4]) << 32);
	case 2: these.sig[1] = these32.sig[3] | (((long)these32.sig[2]) << 32);
	case 1: these.sig[0] = these32.sig[1] | (((long)these32.sig[0]) << 32);
#endif
	}

	/*
	 * Invert the set of allowed signals to get those we
	 * want to block.
	 */
	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
	signotset(&these);

	if (uts) {
		if (get_user (ts.tv_sec, &uts->tv_sec) ||
		    get_user (ts.tv_nsec, &uts->tv_nsec))
			return -EINVAL;
		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
		    || ts.tv_sec < 0)
			return -EINVAL;
	}

	spin_lock_irq(&current->sighand->siglock);
	sig = dequeue_signal(current, &these, &info);
	if (!sig) {
		/* None ready -- temporarily unblock those we're interested
		   in so that we'll be awakened when they arrive.  */
		sigset_t oldblocked = current->blocked;
		sigandsets(&current->blocked, &current->blocked, &these);
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);

		timeout = MAX_SCHEDULE_TIMEOUT;
		if (uts)
			timeout = (timespec_to_jiffies(&ts)
				   + (ts.tv_sec || ts.tv_nsec));

		current->state = TASK_INTERRUPTIBLE;
		timeout = schedule_timeout(timeout);

		spin_lock_irq(&current->sighand->siglock);
		sig = dequeue_signal(current, &these, &info);
		current->blocked = oldblocked;
		recalc_sigpending();
	}
	spin_unlock_irq(&current->sighand->siglock);

	if (sig) {
		ret = sig;
		if (uinfo) {
			if (copy_siginfo_to_user32(uinfo, &info))
				ret = -EFAULT;
		}
	} else {
		ret = -EAGAIN;
		if (timeout)
			ret = -EINTR;
	}

	return ret;
}
Exemple #23
0
/*
 * Do a signal return; undo the signal stack.
 */
long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		   unsigned long r6, unsigned long r7, unsigned long r8,
		   struct pt_regs *regs)
{
	struct sigcontext_struct *sc, sigctx;
	struct sigregs *sr;
	long ret;
	elf_gregset_t saved_regs;  /* an array of ELF_NGREG unsigned longs */
	sigset_t set;
	unsigned long prevsp;

        sc = (struct sigcontext_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
		goto badframe;

	set.sig[0] = sigctx.oldmask;
#if _NSIG_WORDS > 1
	set.sig[1] = sigctx._unused[3];
#endif
	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sigmask_lock);
	current->blocked = set;
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	sc++;			/* Look at next sigcontext */
	if (sc == (struct sigcontext_struct *)(sigctx.regs)) {
		/* Last stacked signal - restore registers */
		sr = (struct sigregs *) sigctx.regs;
		if (regs->msr & MSR_FP )
			giveup_fpu(current);
		if (copy_from_user(saved_regs, &sr->gp_regs,
				   sizeof(sr->gp_regs)))
			goto badframe;
		saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
			| (saved_regs[PT_MSR] & MSR_USERCHANGE);
		saved_regs[PT_SOFTE] = regs->softe;
		memcpy(regs, saved_regs, GP_REGS_SIZE);

		if (copy_from_user(current->thread.fpr, &sr->fp_regs,
				   sizeof(sr->fp_regs)))
			goto badframe;

		ret = regs->result;

	} else {
		/* More signals to go */
		regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE;
		if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
			goto badframe;
		sr = (struct sigregs *) sigctx.regs;
		regs->gpr[3] = ret = sigctx.signal;
		regs->gpr[4] = (unsigned long) sc;
		regs->link = (unsigned long) &sr->tramp;
		regs->nip = sigctx.handler;

		if (get_user(prevsp, &sr->gp_regs[PT_R1])
		    || put_user(prevsp, (unsigned long *) regs->gpr[1]))
			goto badframe;
	}
	return ret;

badframe:
	do_exit(SIGSEGV);
}	
Exemple #24
0
void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	unsigned long tpc, tnpc, tstate;
	__siginfo_fpu_t __user *fpu_save;
	mm_segment_t old_fs;
	sigset_t set;
	stack_t st;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack ();
	sf = (struct rt_signal_frame __user *)
		(regs->u_regs [UREG_FP] + STACK_BIAS);

	/* 1. Make sure we are not getting garbage from the user */
	if (((unsigned long) sf) & 3)
		goto segv;

	err = get_user(tpc, &sf->regs.tpc);
	err |= __get_user(tnpc, &sf->regs.tnpc);
	if (test_thread_flag(TIF_32BIT)) {
		tpc &= 0xffffffff;
		tnpc &= 0xffffffff;
	}
	err |= ((tpc | tnpc) & 3);

	/* 2. Restore the state */
	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(tstate, &sf->regs.tstate);
	err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));

	/* User can only change condition codes and %asi in %tstate. */
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (fpu_save)
		err |= restore_fpu_state(regs, &sf->fpu_state);

	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
	
	if (err)
		goto segv;
		
	regs->tpc = tpc;
	regs->tnpc = tnpc;
	
	/* It is more difficult to avoid calling this function than to
	   call it and ignore errors.  */
	old_fs = get_fs();
	set_fs(KERNEL_DS);
	do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
	set_fs(old_fs);

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = set;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);
	return;
segv:
	force_sig(SIGSEGV, current);
}
long
ia64_rt_sigreturn (struct sigscratch *scr)
{
	extern char ia64_strace_leave_kernel, ia64_leave_kernel;
	struct sigcontext __user *sc;
	struct siginfo si;
	sigset_t set;
	long retval;

	sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc;

	/*
	 * When we return to the previously executing context, r8 and r10 have already
	 * been setup the way we want them.  Indeed, if the signal wasn't delivered while
	 * in a system call, we must not touch r8 or r10 as otherwise user-level state
	 * could be corrupted.
	 */
	retval = (long) &ia64_leave_kernel;
	if (test_thread_flag(TIF_SYSCALL_TRACE)
	    || test_thread_flag(TIF_SYSCALL_AUDIT))
		/*
		 * strace expects to be notified after sigreturn returns even though the
		 * context to which we return may not be in the middle of a syscall.
		 * Thus, the return-value that strace displays for sigreturn is
		 * meaningless.
		 */
		retval = (long) &ia64_strace_leave_kernel;

	if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
		goto give_sigsegv;

	if (GET_SIGSET(&set, &sc->sc_mask))
		goto give_sigsegv;

	sigdelsetmask(&set, ~_BLOCKABLE);

	spin_lock_irq(&current->sighand->siglock);
	{
		current->blocked = set;
		recalc_sigpending();
	}
	spin_unlock_irq(&current->sighand->siglock);

	if (restore_sigcontext(sc, scr))
		goto give_sigsegv;

#if DEBUG_SIG
	printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
	       current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
#endif
	/*
	 * It is more difficult to avoid calling this function than to
	 * call it and ignore errors.
	 */
	do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12);
	return retval;

  give_sigsegv:
	si.si_signo = SIGSEGV;
	si.si_errno = 0;
	si.si_code = SI_KERNEL;
	si.si_pid = task_pid_vnr(current);
	si.si_uid = current_uid();
	si.si_addr = sc;
	force_sig_info(SIGSEGV, &si, current);
	return retval;
}
Exemple #26
0
void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame *sf;
	unsigned long tpc, tnpc, tstate;
	__siginfo_fpu_t *fpu_save;
	mm_segment_t old_fs;
	sigset_t set;
	stack_t st;
	int err;

	synchronize_user_stack ();
	sf = (struct rt_signal_frame *)
		(regs->u_regs [UREG_FP] + STACK_BIAS);

	/* 1. Make sure we are not getting garbage from the user */
	if (((unsigned long) sf) & 3)
		goto segv;

	err = get_user(tpc, &sf->regs.tpc);
	err |= __get_user(tnpc, &sf->regs.tnpc);
	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
		tpc &= 0xffffffff;
		tnpc &= 0xffffffff;
	}
	err |= ((tpc | tnpc) & 3);

	/* 2. Restore the state */
	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(tstate, &sf->regs.tstate);
	err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));

	/* User can only change condition codes in %tstate. */
	regs->tstate &= ~(TSTATE_ICC);
	regs->tstate |= (tstate & TSTATE_ICC);

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (fpu_save)
		err |= restore_fpu_state(regs, &sf->fpu_state);

	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
	
	if (err)
		goto segv;
		
	regs->tpc = tpc;
	regs->tnpc = tnpc;
	
	/* It is more difficult to avoid calling this function than to
	   call it and ignore errors.  */
	old_fs = get_fs();
	set_fs(KERNEL_DS);
	do_sigaltstack(&st, NULL, (unsigned long)sf);
	set_fs(old_fs);

	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sigmask_lock);
	current->blocked = set;
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);
	return;
segv:
	send_sig(SIGSEGV, current, 1);
}
Exemple #27
0
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
		     unsigned long r6, unsigned long r7, unsigned long r8,
		     struct pt_regs *regs)
{
	struct rt_sigframe *rt_sf;
	struct sigcontext_struct sigctx;
	struct sigregs *sr;
	int ret;
	elf_gregset_t saved_regs;  /* an array of ELF_NGREG unsigned longs */
	sigset_t set;
	stack_t st;
	unsigned long prevsp;

	rt_sf = (struct rt_sigframe *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
	if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))
	    || copy_from_user(&set, &rt_sf->uc.uc_sigmask, sizeof(set))
	    || copy_from_user(&st, &rt_sf->uc.uc_stack, sizeof(st)))
		goto badframe;
	sigdelsetmask(&set, ~_BLOCKABLE);
	spin_lock_irq(&current->sigmask_lock);
	current->blocked = set;
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	rt_sf++;			/* Look at next rt_sigframe */
	if (rt_sf == (struct rt_sigframe *)(sigctx.regs)) {
		/* Last stacked signal - restore registers -
		 * sigctx is initialized to point to the 
		 * preamble frame (where registers are stored) 
		 * see handle_signal()
		 */
		sr = (struct sigregs *) sigctx.regs;
		if (regs->msr & MSR_FP )
			giveup_fpu(current);
		if (copy_from_user(saved_regs, &sr->gp_regs,
				   sizeof(sr->gp_regs)))
			goto badframe;
		saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
			| (saved_regs[PT_MSR] & MSR_USERCHANGE);
		saved_regs[PT_SOFTE] = regs->softe;
		memcpy(regs, saved_regs, GP_REGS_SIZE);
		if (copy_from_user(current->thread.fpr, &sr->fp_regs,
				   sizeof(sr->fp_regs)))
			goto badframe;
		/* This function sets back the stack flags into
		   the current task structure.  */
		sys_sigaltstack(&st, NULL);

		ret = regs->result;
	} else {
		/* More signals to go */
		/* Set up registers for next signal handler */
		regs->gpr[1] = (unsigned long)rt_sf - __SIGNAL_FRAMESIZE;
		if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx)))
			goto badframe;
		sr = (struct sigregs *) sigctx.regs;
		regs->gpr[3] = ret = sigctx.signal;
		/* Get the siginfo   */
		get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo);
		/* Get the ucontext */
		get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc);
		regs->gpr[6] = (unsigned long) rt_sf;

		regs->link = (unsigned long) &sr->tramp;
		regs->nip = sigctx.handler;
		if (get_user(prevsp, &sr->gp_regs[PT_R1])
		    || put_user(prevsp, (unsigned long *) regs->gpr[1]))
			goto badframe;
	}
	return ret;

badframe:
	do_exit(SIGSEGV);
}
Exemple #28
0
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
	struct ucontext __user *ucp = (struct ucontext __user *)
		regs->u_regs[UREG_I0];
	mc_gregset_t __user *grp;
	unsigned long pc, npc, tstate;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	flush_user_windows();
	if (get_thread_wsaved()					||
	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
	    (!__access_ok(ucp, sizeof(*ucp))))
		goto do_sigsegv;
	grp  = &ucp->uc_mcontext.mc_gregs;
	err  = __get_user(pc, &((*grp)[MC_PC]));
	err |= __get_user(npc, &((*grp)[MC_NPC]));
	if (err || ((pc | npc) & 3))
		goto do_sigsegv;
	if (regs->u_regs[UREG_I1]) {
		sigset_t set;

		if (_NSIG_WORDS == 1) {
			if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
				goto do_sigsegv;
		} else {
			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
				goto do_sigsegv;
		}
		sigdelsetmask(&set, ~_BLOCKABLE);
		spin_lock_irq(&current->sighand->siglock);
		current->blocked = set;
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}
	if (test_thread_flag(TIF_32BIT)) {
		pc &= 0xffffffff;
		npc &= 0xffffffff;
	}
	regs->tpc = pc;
	regs->tnpc = npc;
	err |= __get_user(regs->y, &((*grp)[MC_Y]));
	err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
	err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
	err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
	err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
	err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
	err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
	err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
	err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
	err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
	err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
	err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
	err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
	err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
	err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
	err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
	err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));

	err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
	err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
	err |= __put_user(fp,
	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __put_user(i7,
	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));

	err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
	if (fenab) {
		unsigned long *fpregs = current_thread_info()->fpregs;
		unsigned long fprs;
		
		fprs_write(0);
		err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
		if (fprs & FPRS_DL)
			err |= copy_from_user(fpregs,
					      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
					      (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_from_user(fpregs+16,
			 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
			 (sizeof(unsigned int) * 32));
		err |= __get_user(current_thread_info()->xfsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
		err |= __get_user(current_thread_info()->gsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
		regs->tstate &= ~TSTATE_PEF;
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	force_sig(SIGSEGV, current);
}
void do_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_signal_frame __user *sf;
	unsigned long tpc, tnpc, tstate;
	__siginfo_fpu_t __user *fpu_save;
	__siginfo_rwin_t __user *rwin_save;
	sigset_t set;
	int err;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	synchronize_user_stack ();
	sf = (struct rt_signal_frame __user *)
		(regs->u_regs [UREG_FP] + STACK_BIAS);

	/* 1. Make sure we are not getting garbage from the user */
	if (((unsigned long) sf) & 3)
		goto segv;

	err = get_user(tpc, &sf->regs.tpc);
	err |= __get_user(tnpc, &sf->regs.tnpc);
	if (test_thread_flag(TIF_32BIT)) {
		tpc &= 0xffffffff;
		tnpc &= 0xffffffff;
	}
	err |= ((tpc | tnpc) & 3);

	/* 2. Restore the state */
	err |= __get_user(regs->y, &sf->regs.y);
	err |= __get_user(tstate, &sf->regs.tstate);
	err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));

	/* User can only change condition codes and %asi in %tstate. */
	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));

	err |= __get_user(fpu_save, &sf->fpu_save);
	if (!err && fpu_save)
		err |= restore_fpu_state(regs, fpu_save);

	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
	if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
		goto segv;

	err |= __get_user(rwin_save, &sf->rwin_save);
	if (!err && rwin_save) {
		if (restore_rwin_state(rwin_save))
			goto segv;
	}

	regs->tpc = tpc;
	regs->tnpc = tnpc;

	/* Prevent syscall restart.  */
	pt_regs_clear_syscall(regs);

	sigdelsetmask(&set, ~_BLOCKABLE);
	set_current_blocked(&set);
	return;
segv:
	force_sig(SIGSEGV, current);
}
Exemple #30
0
asmlinkage void
irix_sigreturn(struct pt_regs *regs)
{
	struct sigctx_irix5 __user *context, *magic;
	unsigned long umask, mask;
	u64 *fregs;
	u32 usedfp;
	int error, sig, i, base = 0;
	sigset_t blocked;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	if (regs->regs[2] == 1000)
		base = 1;

	context = (struct sigctx_irix5 __user *) regs->regs[base + 4];
	magic = (struct sigctx_irix5 __user *) regs->regs[base + 5];
	sig = (int) regs->regs[base + 6];
#ifdef DEBUG_SIG
	printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
	       current->comm, current->pid, context, magic, sig);
#endif
	if (!context)
		context = magic;
	if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
		goto badframe;

#ifdef DEBUG_SIG
	dump_irix5_sigctx(context);
#endif

	error = __get_user(regs->cp0_epc, &context->pc);
	error |= __get_user(umask, &context->rmask);

	mask = 2;
	for (i = 1; i < 32; i++, mask <<= 1) {
		if (umask & mask)
			error |= __get_user(regs->regs[i], &context->regs[i]);
	}
	error |= __get_user(regs->hi, &context->hi);
	error |= __get_user(regs->lo, &context->lo);

	error |= __get_user(usedfp, &context->usedfp);
	if ((umask & 1) && usedfp) {
		fregs = (u64 *) &current->thread.fpu;

		for(i = 0; i < 32; i++)
			error |= __get_user(fregs[i], &context->fpregs[i]);
		error |= __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
	}

	/* XXX do sigstack crapola here... XXX */

	error |= __copy_from_user(&blocked, &context->sigset, sizeof(blocked)) ? -EFAULT : 0;

	if (error)
		goto badframe;

	sigdelsetmask(&blocked, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = blocked;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/*
	 * Don't let your children do this ...
	 */
	if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
		do_syscall_trace(regs, 1);
	__asm__ __volatile__(
		"move\t$29,%0\n\t"
		"j\tsyscall_exit"
		:/* no outputs */
		:"r" (&regs));
		/* Unreached */

badframe:
	force_sig(SIGSEGV, current);
}