unsigned int compat_iret(void) { struct cpu_user_regs *regs = guest_cpu_user_regs(); struct vcpu *v = current; u32 eflags; /* Trim stack pointer to 32 bits. */ regs->rsp = (u32)regs->rsp; /* Restore EAX (clobbered by hypercall). */ if ( unlikely(__get_user(regs->_eax, (u32 *)regs->rsp)) ) goto exit_and_crash; /* Restore CS and EIP. */ if ( unlikely(__get_user(regs->_eip, (u32 *)regs->rsp + 1)) || unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) ) goto exit_and_crash; /* * Fix up and restore EFLAGS. We fix up in a local staging area * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest. */ if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) ) goto exit_and_crash; regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF; if ( unlikely(eflags & X86_EFLAGS_VM) ) { /* * Cannot return to VM86 mode: inject a GP fault instead. Note that * the GP fault is reported on the first VM86 mode instruction, not on * the IRET (which is why we can simply leave the stack frame as-is * (except for perhaps having to copy it), which in turn seems better * than teaching create_bounce_frame() to needlessly deal with vm86 * mode frames). */ const struct trap_info *ti; u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40; unsigned int i; int rc = 0; gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n", regs->_esp, ksp); if ( ksp < regs->_esp ) { for (i = 1; i < 10; ++i) { rc |= __get_user(x, (u32 *)regs->rsp + i); rc |= __put_user(x, (u32 *)(unsigned long)ksp + i); } } else if ( ksp > regs->_esp ) { for ( i = 9; i > 0; --i ) { rc |= __get_user(x, (u32 *)regs->rsp + i); rc |= __put_user(x, (u32 *)(unsigned long)ksp + i); } } if ( rc ) goto exit_and_crash; regs->_esp = ksp; regs->ss = v->arch.pv_vcpu.kernel_ss; ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault]; if ( TI_GET_IF(ti) ) eflags &= ~X86_EFLAGS_IF; regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF| X86_EFLAGS_NT|X86_EFLAGS_TF); if ( unlikely(__put_user(0, (u32 *)regs->rsp)) ) goto exit_and_crash; regs->_eip = ti->address; regs->cs = ti->cs; } else if ( unlikely(ring_0(regs)) ) goto exit_and_crash; else if ( !ring_1(regs) ) { /* Return to ring 2/3: restore ESP and SS. */ if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) || __get_user(regs->_esp, (u32 *)regs->rsp + 4)) goto exit_and_crash; } else regs->_esp += 16; /* Restore upcall mask from supplied EFLAGS.IF. */ vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF); async_exception_cleanup(v); /* * The hypercall exit path will overwrite EAX with this return * value. */ return regs->_eax; exit_and_crash: gprintk(XENLOG_ERR, "Fatal IRET error\n"); domain_crash(v->domain); return 0; }
/* * Handle {get,set,swap}_context operations */ int sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) { unsigned char tmp; sigset_t set; unsigned long new_msr = 0; int ctx_has_vsx_region = 0; if (new_ctx && get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) return -EFAULT; /* * Check that the context is not smaller than the original * size (with VMX but without VSX) */ if (ctx_size < UCONTEXTSIZEWITHOUTVSX) return -EINVAL; /* * If the new context state sets the MSR VSX bits but * it doesn't provide VSX state. */ if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; /* Does the context have enough room to store VSX data? */ if (ctx_size >= sizeof(struct ucontext)) ctx_has_vsx_region = 1; if (old_ctx != NULL) { if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, ctx_has_vsx_region) || __copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked, sizeof(sigset_t))) return -EFAULT; } if (new_ctx == NULL) return 0; if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) do_exit(SIGSEGV); set_current_blocked(&set); if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) do_exit(SIGSEGV); /* This returns like rt_sigreturn */ set_thread_flag(TIF_RESTOREALL); return 0; }
long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int i, ret; unsigned long __user *p = (void __user *)data; void __user *vp = p; switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: ret = peek_user(child, addr, data); break; /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: ret = poke_user(child, addr, data); break; case PTRACE_SYSEMU: case PTRACE_SYSEMU_SINGLESTEP: ret = -EIO; break; #ifdef PTRACE_GETREGS case PTRACE_GETREGS: { /* Get all gp regs from the child. */ if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) { ret = -EIO; break; } for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) { __put_user(getreg(child, i), p); p++; } ret = 0; break; } #endif #ifdef PTRACE_SETREGS case PTRACE_SETREGS: { /* Set all gp regs in the child. */ unsigned long tmp = 0; if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) { ret = -EIO; break; } for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) { __get_user(tmp, p); putreg(child, i, tmp); p++; } ret = 0; break; } #endif case PTRACE_GET_THREAD_AREA: ret = ptrace_get_thread_area(child, addr, vp); break; case PTRACE_SET_THREAD_AREA: ret = ptrace_set_thread_area(child, addr, vp); break; case PTRACE_FAULTINFO: { /* * Take the info from thread->arch->faultinfo, * but transfer max. sizeof(struct ptrace_faultinfo). * On i386, ptrace_faultinfo is smaller! */ ret = copy_to_user(p, &child->thread.arch.faultinfo, sizeof(struct ptrace_faultinfo)) ? -EIO : 0; break; } #ifdef PTRACE_LDT case PTRACE_LDT: { struct ptrace_ldt ldt; if (copy_from_user(&ldt, p, sizeof(ldt))) { ret = -EIO; break; } /* * This one is confusing, so just punt and return -EIO for * now */ ret = -EIO; break; } #endif default: ret = ptrace_request(child, request, addr, data); if (ret == -EIO) ret = subarch_ptrace(child, request, addr, data); break; } return ret; }
/* set the current selection. Invoked by ioctl() or by kernel code. */ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty) { struct vc_data *vc = vc_cons[fg_console].d; int sel_mode, new_sel_start, new_sel_end, spc; char *bp, *obp; int i, ps, pe; poke_blanked_console(); { unsigned short xs, ys, xe, ye; if (!access_ok(VERIFY_READ, sel, sizeof(*sel))) return -EFAULT; __get_user(xs, &sel->xs); __get_user(ys, &sel->ys); __get_user(xe, &sel->xe); __get_user(ye, &sel->ye); __get_user(sel_mode, &sel->sel_mode); xs--; ys--; xe--; ye--; xs = limit(xs, vc->vc_cols - 1); ys = limit(ys, vc->vc_rows - 1); xe = limit(xe, vc->vc_cols - 1); ye = limit(ye, vc->vc_rows - 1); ps = ys * vc->vc_size_row + (xs << 1); pe = ye * vc->vc_size_row + (xe << 1); if (sel_mode == TIOCL_SELCLEAR) { /* useful for screendump without selection highlights */ clear_selection(); return 0; } if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) { mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys); return 0; } } if (ps > pe) /* make sel_start <= sel_end */ { int tmp = ps; ps = pe; pe = tmp; } if (sel_cons != vc_cons[fg_console].d) { clear_selection(); sel_cons = vc_cons[fg_console].d; } switch (sel_mode) { case TIOCL_SELCHAR: /* character-by-character selection */ new_sel_start = ps; new_sel_end = pe; break; case TIOCL_SELWORD: /* word-by-word selection */ spc = isspace(sel_pos(ps)); for (new_sel_start = ps; ; ps -= 2) { if ((spc && !isspace(sel_pos(ps))) || (!spc && !inword(sel_pos(ps)))) break; new_sel_start = ps; if (!(ps % vc->vc_size_row)) break; } spc = isspace(sel_pos(pe)); for (new_sel_end = pe; ; pe += 2) { if ((spc && !isspace(sel_pos(pe))) || (!spc && !inword(sel_pos(pe)))) break; new_sel_end = pe; if (!((pe + 2) % vc->vc_size_row)) break; } break; case TIOCL_SELLINE: /* line-by-line selection */ new_sel_start = ps - ps % vc->vc_size_row; new_sel_end = pe + vc->vc_size_row - pe % vc->vc_size_row - 2; break; case TIOCL_SELPOINTER: highlight_pointer(pe); return 0; default: return -EINVAL; } /* remove the pointer */ highlight_pointer(-1); /* select to end of line if on trailing space */ if (new_sel_end > new_sel_start && !atedge(new_sel_end, vc->vc_size_row) && isspace(sel_pos(new_sel_end))) { for (pe = new_sel_end + 2; ; pe += 2) if (!isspace(sel_pos(pe)) || atedge(pe, vc->vc_size_row)) break; if (isspace(sel_pos(pe))) new_sel_end = pe; } if (sel_start == -1) /* no current selection */ highlight(new_sel_start, new_sel_end); else if (new_sel_start == sel_start) { if (new_sel_end == sel_end) /* no action required */ return 0; else if (new_sel_end > sel_end) /* extend to right */ highlight(sel_end + 2, new_sel_end); else /* contract from right */ highlight(new_sel_end + 2, sel_end); } else if (new_sel_end == sel_end) { if (new_sel_start < sel_start) /* extend to left */ highlight(new_sel_start, sel_start - 2); else /* contract from left */ highlight(sel_start, new_sel_start - 2); } else /* some other case; start selection from scratch */ { clear_selection(); highlight(new_sel_start, new_sel_end); } sel_start = new_sel_start; sel_end = new_sel_end; /* Allocate a new buffer before freeing the old one ... */ bp = kmalloc((sel_end-sel_start)/2+1, GFP_KERNEL); if (!bp) { printk(KERN_WARNING "selection: kmalloc() failed\n"); clear_selection(); return -ENOMEM; } kfree(sel_buffer); sel_buffer = bp; obp = bp; for (i = sel_start; i <= sel_end; i += 2) { *bp = sel_pos(i); if (!isspace(*bp++)) obp = bp; if (! ((i + 2) % vc->vc_size_row)) { /* strip trailing blanks from line and add newline, unless non-space at end of line. */ if (obp != bp) { bp = obp; *bp++ = '\r'; } obp = bp; } } sel_buffer_lth = bp - sel_buffer; return 0; }
static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, struct sigcontext __user *sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs; #endif unsigned long err = 0; unsigned long save_r13 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* If this is not a signal return, we preserve the TLS in r13 */ if (!sig) save_r13 = regs->gpr[13]; /* copy the GPRs */ err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); /* skip SOFTE */ regs->trap = 0; err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); if (!sig) regs->gpr[13] = save_r13; if (set != NULL) err |= __get_user(set->sig[0], &sc->oldmask); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != 0 && (msr & MSR_VEC) != 0) err |= __copy_from_user(current->thread.vr, v_regs, 33 * sizeof(vector128)); else if (current->thread.used_vr) memset(current->thread.vr, 0, 33 * sizeof(vector128)); /* Always get VRSAVE back */ if (v_regs != 0) err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); else current->thread.vrsave = 0; #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ v_regs += ELF_NVRREG; if ((msr & MSR_VSX) != 0) err |= copy_vsx_from_user(current, v_regs); else for (i = 0; i < 32 ; i++) current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; #endif return err; }
/* * The ioctl() implementation */ int scull_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0, tmp; int retval = 0; /* * extract the type and number bitfields, and don't decode * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() */ if (_IOC_TYPE(cmd) != SCULL_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SCULL_IOC_MAXNR) return -ENOTTY; /* * the direction is a bitmask, and VERIFY_WRITE catches R/W * transfers. `Type' is user-oriented, while * access_ok is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (err) return -EFAULT; switch(cmd) { case SCULL_IOCRESET: scull_quantum = SCULL_QUANTUM; scull_qset = SCULL_QSET; break; case SCULL_IOCSQUANTUM: /* Set: arg points to the value */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; retval = __get_user(scull_quantum, (int __user *)arg); break; case SCULL_IOCTQUANTUM: /* Tell: arg is the value */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; scull_quantum = arg; break; case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */ retval = __put_user(scull_quantum, (int __user *)arg); break; case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */ return scull_quantum; case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_quantum; retval = __get_user(scull_quantum, (int __user *)arg); if (retval == 0) retval = __put_user(tmp, (int __user *)arg); break; case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_quantum; scull_quantum = arg; return tmp; case SCULL_IOCSQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; retval = __get_user(scull_qset, (int __user *)arg); break; case SCULL_IOCTQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; scull_qset = arg; break; case SCULL_IOCGQSET: retval = __put_user(scull_qset, (int __user *)arg); break; case SCULL_IOCQQSET: return scull_qset; case SCULL_IOCXQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_qset; retval = __get_user(scull_qset, (int __user *)arg); if (retval == 0) retval = put_user(tmp, (int __user *)arg); break; case SCULL_IOCHQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_qset; scull_qset = arg; return tmp; /* * The following two change the buffer size for scullpipe. * The scullpipe device uses this same ioctl method, just to * write less code. Actually, it's the same driver, isn't it? */ case SCULL_P_IOCTSIZE: scull_p_buffer = arg; break; case SCULL_P_IOCQSIZE: return scull_p_buffer; default: /* redundant, as cmd was checked against MAXNR */ return -ENOTTY; } return retval; }
void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; synchronize_user_stack (); sf = (struct rt_signal_frame __user *) (regs->u_regs [UREG_FP] + STACK_BIAS); /* 1. Make sure we are not getting garbage from the user */ if (((unsigned long) sf) & 3) goto segv; err = get_user(tpc, &sf->regs.tpc); err |= __get_user(tnpc, &sf->regs.tnpc); if (test_thread_flag(TIF_32BIT)) { tpc &= 0xffffffff; tnpc &= 0xffffffff; } err |= ((tpc | tnpc) & 3); /* 2. Restore the state */ err |= __get_user(regs->y, &sf->regs.y); err |= __get_user(tstate, &sf->regs.tstate); err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs)); /* User can only change condition codes and %asi in %tstate. */ regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(fpu_save, &sf->fpu_save); if (!err && fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); if (!err && rwin_save) { if (restore_rwin_state(rwin_save)) goto segv; } regs->tpc = tpc; regs->tnpc = tnpc; /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return; segv: force_sig(SIGSEGV, current); }
asmlinkage long sys32_adjtimex(struct timex32 __user *utp) { struct timex txc; int ret; memset(&txc, 0, sizeof(struct timex)); if(get_user(txc.modes, &utp->modes) || __get_user(txc.offset, &utp->offset) || __get_user(txc.freq, &utp->freq) || __get_user(txc.maxerror, &utp->maxerror) || __get_user(txc.esterror, &utp->esterror) || __get_user(txc.status, &utp->status) || __get_user(txc.constant, &utp->constant) || __get_user(txc.precision, &utp->precision) || __get_user(txc.tolerance, &utp->tolerance) || __get_user(txc.time.tv_sec, &utp->time.tv_sec) || __get_user(txc.time.tv_usec, &utp->time.tv_usec) || __get_user(txc.tick, &utp->tick) || __get_user(txc.ppsfreq, &utp->ppsfreq) || __get_user(txc.jitter, &utp->jitter) || __get_user(txc.shift, &utp->shift) || __get_user(txc.stabil, &utp->stabil) || __get_user(txc.jitcnt, &utp->jitcnt) || __get_user(txc.calcnt, &utp->calcnt) || __get_user(txc.errcnt, &utp->errcnt) || __get_user(txc.stbcnt, &utp->stbcnt)) return -EFAULT; ret = do_adjtimex(&txc); /* adjust the conversion of TB to time of day to track adjtimex */ ppc_adjtimex(); if(put_user(txc.modes, &utp->modes) || __put_user(txc.offset, &utp->offset) || __put_user(txc.freq, &utp->freq) || __put_user(txc.maxerror, &utp->maxerror) || __put_user(txc.esterror, &utp->esterror) || __put_user(txc.status, &utp->status) || __put_user(txc.constant, &utp->constant) || __put_user(txc.precision, &utp->precision) || __put_user(txc.tolerance, &utp->tolerance) || __put_user(txc.time.tv_sec, &utp->time.tv_sec) || __put_user(txc.time.tv_usec, &utp->time.tv_usec) || __put_user(txc.tick, &utp->tick) || __put_user(txc.ppsfreq, &utp->ppsfreq) || __put_user(txc.jitter, &utp->jitter) || __put_user(txc.shift, &utp->shift) || __put_user(txc.stabil, &utp->stabil) || __put_user(txc.jitcnt, &utp->jitcnt) || __put_user(txc.calcnt, &utp->calcnt) || __put_user(txc.errcnt, &utp->errcnt) || __put_user(txc.stbcnt, &utp->stbcnt)) ret = -EFAULT; return ret; }
void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; mm_segment_t old_fs; sigset_t set; stack_t st; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; synchronize_user_stack (); sf = (struct rt_signal_frame __user *) (regs->u_regs [UREG_FP] + STACK_BIAS); /* 1. Make sure we are not getting garbage from the user */ if (((unsigned long) sf) & 3) goto segv; err = get_user(tpc, &sf->regs.tpc); err |= __get_user(tnpc, &sf->regs.tnpc); if (test_thread_flag(TIF_32BIT)) { tpc &= 0xffffffff; tnpc &= 0xffffffff; } err |= ((tpc | tnpc) & 3); /* 2. Restore the state */ err |= __get_user(regs->y, &sf->regs.y); err |= __get_user(tstate, &sf->regs.tstate); err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs)); /* User can only change condition codes and %asi in %tstate. */ regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(fpu_save, &sf->fpu_save); if (fpu_save) err |= restore_fpu_state(regs, &sf->fpu_state); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t)); if (err) goto segv; regs->tpc = tpc; regs->tnpc = tnpc; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ old_fs = get_fs(); set_fs(KERNEL_DS); do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); set_fs(old_fs); sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return; segv: force_sig(SIGSEGV, current); }
static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { unsigned long tmp; int err = 0; err |= copy_from_user(®s->r00, &sc->sc_regs.r0, 32 * sizeof(unsigned long)); err |= __get_user(regs->sa0, &sc->sc_regs.sa0); err |= __get_user(regs->lc0, &sc->sc_regs.lc0); err |= __get_user(regs->sa1, &sc->sc_regs.sa1); err |= __get_user(regs->lc1, &sc->sc_regs.lc1); err |= __get_user(regs->m0, &sc->sc_regs.m0); err |= __get_user(regs->m1, &sc->sc_regs.m1); err |= __get_user(regs->usr, &sc->sc_regs.usr); err |= __get_user(regs->preds, &sc->sc_regs.p3_0); err |= __get_user(regs->gp, &sc->sc_regs.gp); err |= __get_user(regs->ugp, &sc->sc_regs.ugp); err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp); return err; }
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[15]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { regs->pr = (unsigned long) ka->sa.sa_restorer; #ifdef CONFIG_VSYSCALL } else if (likely(current->mm->context.vdso)) { regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); #endif } else { /* Generate return code (system call to rt_sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); err |= __put_user(OR_R0_R0, &frame->retcode[5]); err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; } if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->regs[15] = (unsigned long) frame; regs->regs[4] = signal; /* Arg for signal handler */ regs->regs[5] = (unsigned long) &frame->info; regs->regs[6] = (unsigned long) &frame->uc; if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *)ka->sa.sa_handler; __get_user(regs->pc, &funcptr->text); __get_user(regs->regs[12], &funcptr->GOT); } else regs->pc = (unsigned long)ka->sa.sa_handler; set_fs(USER_DS); pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
/* * Commands to do_syslog: * * 0 -- Close the log. Currently a NOP. * 1 -- Open the log. Currently a NOP. * 2 -- Read from the log. * 3 -- Read all messages remaining in the ring buffer. * 4 -- Read and clear all messages remaining in the ring buffer * 5 -- Clear ring buffer. * 6 -- Disable printk's to console * 7 -- Enable printk's to console * 8 -- Set level of messages printed to console * 9 -- Return number of unread characters in the log buffer * 10 -- Return size of the log buffer */ int do_syslog(int type, char __user *buf, int len) { unsigned i, j, limit, count; int do_clear = 0; char c; int error = 0; error = security_syslog(type); if (error) return error; switch (type) { case 0: /* Close log */ break; case 1: /* Open log */ break; case 2: /* Read from log */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) goto out; i = 0; spin_lock_irq(&logbuf_lock); while (!error && (log_start != log_end) && i < len) { c = LOG_BUF(log_start); log_start++; spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (!error) error = i; break; case 4: /* Read/clear last kernel messages */ do_clear = 1; /* FALL THRU */ case 3: /* Read last kernel messages */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } count = len; if (count > log_buf_len) count = log_buf_len; spin_lock_irq(&logbuf_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages * we try to copy to user space. Therefore * the messages are copied in reverse. <manfreds> */ for (i = 0; i < count && !error; i++) { j = limit-1-i; if (j + log_buf_len < log_end) break; c = LOG_BUF(j); spin_unlock_irq(&logbuf_lock); error = __put_user(c,&buf[count-1-i]); cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (error) break; error = i; if (i != count) { int offset = count-error; /* buffer overflow during copy, correct user buffer. */ for (i = 0; i < error; i++) { if (__get_user(c,&buf[i+offset]) || __put_user(c,&buf[i])) { error = -EFAULT; break; } cond_resched(); } } break; case 5: /* Clear ring buffer */ logged_chars = 0; break; case 6: /* Disable logging to console */ console_loglevel = minimum_console_loglevel; break; case 7: /* Enable logging to console */ console_loglevel = default_console_loglevel; break; case 8: /* Set level of messages printed to console */ error = -EINVAL; if (len < 1 || len > 8) goto out; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; error = 0; break; case 9: /* Number of chars in the log buffer */ error = log_end - log_start; break; case 10: /* Size of the log buffer */ error = log_buf_len; break; default: error = -EINVAL; break; } out: return error; }
static long get_tv32(struct timeval *o, struct compat_timeval __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); }
ssize_t compat_rw_copy_check_uvector(int type, const struct compat_iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer) { compat_ssize_t tot_len; struct iovec *iov = *ret_pointer = fast_pointer; ssize_t ret = 0; int seg; /* * SuS says "The readv() function *may* fail if the iovcnt argument * was less than or equal to 0, or greater than {IOV_MAX}. Linux has * traditionally returned zero for zero segments, so... */ if (nr_segs == 0) goto out; ret = -EINVAL; if (nr_segs > UIO_MAXIOV) goto out; if (nr_segs > fast_segs) { ret = -ENOMEM; iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); if (iov == NULL) goto out; } *ret_pointer = iov; ret = -EFAULT; if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) goto out; /* * Single unix specification: * We should -EINVAL if an element length is not >= 0 and fitting an * ssize_t. * * In Linux, the total length is limited to MAX_RW_COUNT, there is * no overflow possibility. */ tot_len = 0; ret = -EINVAL; for (seg = 0; seg < nr_segs; seg++) { compat_uptr_t buf; compat_ssize_t len; if (__get_user(len, &uvector->iov_len) || __get_user(buf, &uvector->iov_base)) { ret = -EFAULT; goto out; } if (len < 0) /* size_t not fitting in compat_ssize_t .. */ goto out; if (type >= 0 && !access_ok(vrfy_dir(type), compat_ptr(buf), len)) { ret = -EFAULT; goto out; } if (len > MAX_RW_COUNT - tot_len) len = MAX_RW_COUNT - tot_len; tot_len += len; iov->iov_base = compat_ptr(buf); iov->iov_len = (compat_size_t) len; uvector++; iov++; } ret = tot_len; out: return ret; }
int compat_mc_getsockopt(struct sock *sock, int level, int optname, char __user *optval, int __user *optlen, int (*getsockopt)(struct sock *,int,int,char __user *,int __user *)) { struct compat_group_filter __user *gf32 = (void *)optval; struct group_filter __user *kgf; int __user *koptlen; u32 interface, fmode, numsrc; int klen, ulen, err; if (optname != MCAST_MSFILTER) return getsockopt(sock, level, optname, optval, optlen); koptlen = compat_alloc_user_space(sizeof(*koptlen)); if (!access_ok(VERIFY_READ, optlen, sizeof(*optlen)) || __get_user(ulen, optlen)) return -EFAULT; /* adjust len for pad */ klen = ulen + sizeof(*kgf) - sizeof(*gf32); if (klen < GROUP_FILTER_SIZE(0)) return -EINVAL; if (!access_ok(VERIFY_WRITE, koptlen, sizeof(*koptlen)) || __put_user(klen, koptlen)) return -EFAULT; /* have to allow space for previous compat_alloc_user_space, too */ kgf = compat_alloc_user_space(klen+sizeof(*optlen)); if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) || __get_user(interface, &gf32->gf_interface) || __get_user(fmode, &gf32->gf_fmode) || __get_user(numsrc, &gf32->gf_numsrc) || __put_user(interface, &kgf->gf_interface) || __put_user(fmode, &kgf->gf_fmode) || __put_user(numsrc, &kgf->gf_numsrc) || copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group))) return -EFAULT; err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); if (err) return err; if (!access_ok(VERIFY_READ, koptlen, sizeof(*koptlen)) || __get_user(klen, koptlen)) return -EFAULT; ulen = klen - (sizeof(*kgf)-sizeof(*gf32)); if (!access_ok(VERIFY_WRITE, optlen, sizeof(*optlen)) || __put_user(ulen, optlen)) return -EFAULT; if (!access_ok(VERIFY_READ, kgf, klen) || !access_ok(VERIFY_WRITE, gf32, ulen) || __get_user(interface, &kgf->gf_interface) || __get_user(fmode, &kgf->gf_fmode) || __get_user(numsrc, &kgf->gf_numsrc) || __put_user(interface, &gf32->gf_interface) || __put_user(fmode, &gf32->gf_fmode) || __put_user(numsrc, &gf32->gf_numsrc)) return -EFAULT; if (numsrc) { int copylen; klen -= GROUP_FILTER_SIZE(0); copylen = numsrc * sizeof(gf32->gf_slist[0]); if (copylen > klen) copylen = klen; if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) return -EFAULT; } return err; }
static int pcidev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; struct pcidev_struct *pcidev = (struct pcidev_struct *)file->private_data; if (!pcidev) return -EIO; switch(cmd) { case PCIDEV_IOCTL_FIND: { struct pcidev_find_struct *find; struct pci_dev *dev; unsigned long vendorID, deviceID; int idx; if (pcidev->dev) return -EIO; // only alloc once for now if (!access_ok(VERIFY_WRITE, (void *)arg, sizeof(struct pcidev_find_struct))) return -EFAULT; find = (struct pcidev_find_struct *)arg; __get_user(vendorID, &find->vendorID); __get_user(deviceID, &find->deviceID); __put_user(-1, &find->bus); __put_user(-1, &find->device); __put_user(-1, &find->func); dev = pci_find_device(vendorID, deviceID, NULL); if (!dev) return -ENOENT; if (pci_enable_device(dev)) { printk(KERN_WARNING "pcidev: Could not enable the PCI device.\n"); return -EIO; } if (pci_set_dma_mask(dev, 0xffffffff)) printk(KERN_WARNING "pcidev: only limited PCI busmaster DMA support.\n"); pci_set_master(dev); printk(KERN_INFO "pcidev: device found at %x:%x.%d\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); ret = pci_request_regions(dev, pcidev_name); if (ret < 0) break; for (idx = 0; idx < PCIDEV_COUNT_RESOURCES; idx++) { if (pci_resource_flags(dev, idx) & IORESOURCE_MEM) { long len = pci_resource_len(dev, idx); unsigned long mapped_start = (unsigned long)ioremap(pci_resource_start(dev, idx), len); __put_user(mapped_start, &find->resources[idx].start); __put_user(mapped_start + len - 1, &find->resources[idx].end); pcidev->mapped_mem[idx] = (void *)mapped_start; } else { pcidev->mapped_mem[idx] = NULL; __put_user(pci_resource_start(dev, idx), &find->resources[idx].start); __put_user(pci_resource_end(dev, idx), &find->resources[idx].end); } __put_user(pci_resource_flags(dev, idx), &find->resources[idx].flags); } pcidev->dev = dev; __put_user(dev->bus->number, &find->bus); __put_user(PCI_SLOT(dev->devfn), &find->device); __put_user(PCI_FUNC(dev->devfn), &find->func); ret = 0; break; } case PCIDEV_IOCTL_READ_CONFIG_BYTE: case PCIDEV_IOCTL_READ_CONFIG_WORD: case PCIDEV_IOCTL_READ_CONFIG_DWORD: { struct pcidev_io_struct *io; unsigned long address, value; if (!pcidev->dev) return -EIO; if (!access_ok(VERIFY_WRITE, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); __put_user(-1, &io->value); printk(KERN_DEBUG "pcidev: reading config address %#x\n", (int)address); switch(cmd) { case PCIDEV_IOCTL_READ_CONFIG_BYTE: ret = pci_read_config_byte(pcidev->dev, address, (u8 *)&value); break; case PCIDEV_IOCTL_READ_CONFIG_WORD: ret = pci_read_config_word(pcidev->dev, address, (u16 *)&value); break; case PCIDEV_IOCTL_READ_CONFIG_DWORD: ret = pci_read_config_dword(pcidev->dev, address, (u32 *)&value); break; } if (ret < 0) return ret; __put_user(value, &io->value); break; } case PCIDEV_IOCTL_WRITE_CONFIG_BYTE: case PCIDEV_IOCTL_WRITE_CONFIG_WORD: case PCIDEV_IOCTL_WRITE_CONFIG_DWORD: { struct pcidev_io_struct *io; unsigned long address, value; if (!pcidev->dev) return -EIO; if (!access_ok(VERIFY_READ, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); __get_user(value, &io->value); /* * Next tests prevent the pcidev user from remapping * the PCI host device since this could cause great * trouble because we don't own those I/O resources. * If the pcidev wants to remap a device he needs to * emulate the mapping himself and not bother the host * kernel about it. */ if (address == PCI_INTERRUPT_PIN) { printk(KERN_WARNING "pcidev: not allowed to set irq pin!\n"); return -EIO; } if (address == PCI_INTERRUPT_LINE) { printk(KERN_WARNING "pcidev: not allowed to set irq line!\n"); return -EIO; } if (PCI_BASE_ADDRESS_0 <= address && (address & ~3UL) <= PCI_BASE_ADDRESS_5) { printk(KERN_WARNING "pcidev: now allowed to change base address %d\n", (int)((address & ~3UL) - PCI_BASE_ADDRESS_0) / 4); return -EIO; } printk(KERN_DEBUG "pcidev: writing config address %#x\n", (int)address); switch(cmd) { case PCIDEV_IOCTL_WRITE_CONFIG_BYTE: ret = pci_write_config_byte(pcidev->dev, address, (u8)value); break; case PCIDEV_IOCTL_WRITE_CONFIG_WORD: ret = pci_write_config_word(pcidev->dev, address, (u16)value); break; case PCIDEV_IOCTL_WRITE_CONFIG_DWORD: ret = pci_write_config_dword(pcidev->dev, address, (u32)value); break; } break; } case PCIDEV_IOCTL_INTERRUPT: { u8 irq; if (!pcidev->dev) return -EIO; ret = pci_read_config_byte(pcidev->dev, PCI_INTERRUPT_PIN, &irq); if (ret < 0) break; if (!irq) return -EIO; ret = pci_read_config_byte(pcidev->dev, PCI_INTERRUPT_LINE, &irq); if (ret < 0) break; if (arg & 1) { pcidev->pid = current->pid; // our dev_id printk(KERN_INFO "pcidev: enabling IRQ %d\n", irq); ret = request_irq(irq, pcidev_irqhandler, SA_SHIRQ, pcidev_name, (void *)current->pid); } else { if (!pcidev->pid) return -EIO; printk(KERN_INFO "pcidev: disabling IRQ %d\n", irq); free_irq(irq, (void *)pcidev->pid); pcidev->pid = 0; ret = 0; } break; } /* * Next ioctl is only for testing purposes. */ case PCIDEV_IOCTL_INTERRUPT_TEST: { ret = -EIO; if (!pcidev->dev) break; if (!pcidev->pid) break; if (pcidev->irq_timer.function) del_timer_sync(&pcidev->irq_timer); pcidev->irq_timer.function = NULL; if (arg & 1) { init_timer(&pcidev->irq_timer); pcidev->irq_timer.function = irq_test_timer; pcidev->irq_timer.data = (unsigned long)pcidev; pcidev->irq_timer.expires = jiffies + HZ; add_timer(&pcidev->irq_timer); } ret = 0; break; } case PCIDEV_IOCTL_READ_IO_BYTE: case PCIDEV_IOCTL_READ_IO_WORD: case PCIDEV_IOCTL_READ_IO_DWORD: { /* * We should probably check access rights against * the PCI resource list... but who cares for a * security hole more or less :) */ struct pcidev_io_struct *io; unsigned long address, value = -1; if (!access_ok(VERIFY_WRITE, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); printk(KERN_DEBUG "pcidev: reading I/O port %#x\n", (int)address); switch(cmd) { case PCIDEV_IOCTL_READ_IO_BYTE: value = inb(address); break; case PCIDEV_IOCTL_READ_IO_WORD: value = inw(address); break; case PCIDEV_IOCTL_READ_IO_DWORD: value = inl(address); break; } __put_user(value, &io->value); ret = 0; break; } case PCIDEV_IOCTL_WRITE_IO_BYTE: case PCIDEV_IOCTL_WRITE_IO_WORD: case PCIDEV_IOCTL_WRITE_IO_DWORD: { struct pcidev_io_struct *io; unsigned long address, value; if (!access_ok(VERIFY_READ, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); __get_user(value, &io->value); printk(KERN_DEBUG "pcidev: writing I/O port %#x\n", (int)address); switch(cmd) { case PCIDEV_IOCTL_WRITE_IO_BYTE: outb(value, address); break; case PCIDEV_IOCTL_WRITE_IO_WORD: outw(value, address); break; case PCIDEV_IOCTL_WRITE_IO_DWORD: outl(value, address); break; } ret = 0; break; } case PCIDEV_IOCTL_READ_MEM_BYTE: case PCIDEV_IOCTL_READ_MEM_WORD: case PCIDEV_IOCTL_READ_MEM_DWORD: { struct pcidev_io_struct *io; unsigned long address, value = -1; if (!access_ok(VERIFY_WRITE, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); printk(KERN_DEBUG "pcidev: reading memory %#x\n", (int)address); switch(cmd) { case PCIDEV_IOCTL_READ_MEM_BYTE: value = readb((unsigned char *)address); break; case PCIDEV_IOCTL_READ_MEM_WORD: value = readw((unsigned short *)address); break; case PCIDEV_IOCTL_READ_MEM_DWORD: value = readl((unsigned int *)address); break; } __put_user(value, &io->value); ret = 0; break; } case PCIDEV_IOCTL_WRITE_MEM_BYTE: case PCIDEV_IOCTL_WRITE_MEM_WORD: case PCIDEV_IOCTL_WRITE_MEM_DWORD: { struct pcidev_io_struct *io; unsigned long address, value; if (!access_ok(VERIFY_READ, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); __get_user(value, &io->value); printk(KERN_DEBUG "pcidev: writing memory %#x\n", (int)address); switch(cmd) { case PCIDEV_IOCTL_WRITE_MEM_BYTE: writeb(value, (unsigned char *)address); break; case PCIDEV_IOCTL_WRITE_MEM_WORD: writew(value, (unsigned short *)address); break; case PCIDEV_IOCTL_WRITE_MEM_DWORD: writel(value, (unsigned int *)address); break; } ret = 0; break; } case PCIDEV_IOCTL_PROBE_CONFIG_DWORD: { /* * This ioctl allows for probing a config space value. * This can be used for base address size probing */ struct pcidev_io_struct *io; unsigned long address, value, orig_value; if (!pcidev->dev) return -EIO; if (!access_ok(VERIFY_WRITE, (void *)arg, sizeof(struct pcidev_io_struct))) return -EFAULT; io = (struct pcidev_io_struct *)arg; __get_user(address, &io->address); __get_user(value, &io->value); __put_user(-1, &io->value); printk(KERN_INFO "pcidev: probing config space address: %#x\n", (int)address); ret = pci_read_config_dword(pcidev->dev, address, (u32 *)&orig_value); if (ret < 0) break; pci_write_config_dword(pcidev->dev, address, (u32)value); pci_read_config_dword(pcidev->dev, address, (u32 *)&value); ret = pci_write_config_dword(pcidev->dev, address, (u32)orig_value); if (ret < 0) break; __put_user(value, &io->value); break; } default: ret = -ENOTTY; } return ret; }
static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc) { u32 used_math; int err = 0; s32 treg; int i; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; err |= __get_user(regs->cp0_epc, &sc->sc_pc); err |= __get_user(regs->hi, &sc->sc_mdhi); err |= __get_user(regs->lo, &sc->sc_mdlo); if (cpu_has_dsp) { err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); } for (i = 1; i < 32; i++) err |= __get_user(regs->regs[i], &sc->sc_regs[i]); err |= __get_user(used_math, &sc->sc_used_math); conditional_used_math(used_math); if (used_math) { /* restore fpu context if we have used it before */ if (!err) err = check_and_restore_fp_context32(sc); } else { /* signal handler may have used FPU. Give it up. */ lose_fpu(0); } return err; }
long restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs) { long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regt; int regn; /* When loading 32-bit values into 64-bit registers make sure to clear the upper 32-bits */ DBG(2,"restore_sigcontext32: PER_LINUX32 process\n"); DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs); DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc)); for(regn=0; regn < 32; regn++){ err |= __get_user(compat_reg,&sc->sc_gr[regn]); regs->gr[regn] = compat_reg; /* Load upper half */ err |= __get_user(compat_regt,&rf->rf_gr[regn]); regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n", regn, regs->gr[regn], compat_regt, compat_reg); } DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr)); /* XXX: BE WARNED FR's are 64-BIT! */ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr)); /* Better safe than sorry, pass __get_user two things of the same size and let gcc do the upward conversion to 64-bits */ err |= __get_user(compat_reg, &sc->sc_iaoq[0]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[0]); regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n", &sc->sc_iaoq[0], compat_reg); err |= __get_user(compat_reg, &sc->sc_iaoq[1]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[1]); regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n", &sc->sc_iaoq[1],compat_reg); DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n", regs->iaoq[0],regs->iaoq[1]); err |= __get_user(compat_reg, &sc->sc_iasq[0]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[0]); regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt); err |= __get_user(compat_reg, &sc->sc_iasq[1]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[1]); regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n", regs->iasq[0],regs->iasq[1]); err |= __get_user(compat_reg, &sc->sc_sar); /* Load the upper half for sar */ err |= __get_user(compat_regt, &rf->rf_sar); regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar); DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; }
asmlinkage void sparc64_get_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; mc_gregset_t __user *grp; mcontext_t __user *mcp; unsigned long fp, i7; unsigned char fenab; int err; synchronize_user_stack(); if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp))) goto do_sigsegv; #if 1 fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */ #else fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF); #endif mcp = &ucp->uc_mcontext; grp = &mcp->mc_gregs; /* Skip over the trap instruction, first. */ if (test_thread_flag(TIF_32BIT)) { regs->tpc = (regs->tnpc & 0xffffffff); regs->tnpc = (regs->tnpc + 4) & 0xffffffff; } else { regs->tpc = regs->tnpc; regs->tnpc += 4; } err = 0; if (_NSIG_WORDS == 1) err |= __put_user(current->blocked.sig[0], (unsigned long __user *)&ucp->uc_sigmask); else err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked, sizeof(sigset_t)); err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE])); err |= __put_user(regs->tpc, &((*grp)[MC_PC])); err |= __put_user(regs->tnpc, &((*grp)[MC_NPC])); err |= __put_user(regs->y, &((*grp)[MC_Y])); err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1])); err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2])); err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3])); err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4])); err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5])); err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6])); err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7])); err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0])); err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1])); err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2])); err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3])); err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4])); err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5])); err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6])); err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7])); err |= __get_user(fp, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); err |= __get_user(i7, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); err |= __put_user(fp, &(mcp->mc_fp)); err |= __put_user(i7, &(mcp->mc_i7)); err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab)); if (fenab) { unsigned long *fpregs = current_thread_info()->fpregs; unsigned long fprs; fprs = current_thread_info()->fpsaved[0]; if (fprs & FPRS_DL) err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs, (sizeof(unsigned int) * 32)); if (fprs & FPRS_DU) err |= copy_to_user( ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16, (sizeof(unsigned int) * 32)); err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr)); err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr)); err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs)); } if (err) goto do_sigsegv; return; do_sigsegv: force_sig(SIGSEGV, current); }
asmlinkage void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned int psr, pc, npc; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; mm_segment_t old_fs; sigset_t set; stack_t st; int err; synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || (((unsigned long) sf) & 0x03)) goto segv; err = __get_user(pc, &sf->regs.pc); err |= __get_user(npc, &sf->regs.npc); err |= ((pc | npc) & 0x03); err |= __get_user(regs->y, &sf->regs.y); err |= __get_user(psr, &sf->regs.psr); err |= __copy_from_user(®s->u_regs[UREG_G1], &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32)); regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC); /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); if (!err && fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t)); if (err) goto segv; regs->pc = pc; regs->npc = npc; /* It is more difficult to avoid calling this function than to * call it and ignore errors. */ old_fs = get_fs(); set_fs(KERNEL_DS); do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); set_fs(old_fs); err |= __get_user(rwin_save, &sf->rwin_save); if (!err && rwin_save) { if (restore_rwin_state(rwin_save)) goto segv; } sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return; segv: force_sig(SIGSEGV, current); }
/* {set, get}context() needed for 64-bit SparcLinux userland. */ asmlinkage void sparc64_set_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; mc_gregset_t __user *grp; unsigned long pc, npc, tstate; unsigned long fp, i7; unsigned char fenab; int err; flush_user_windows(); if (get_thread_wsaved() || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (!__access_ok(ucp, sizeof(*ucp)))) goto do_sigsegv; grp = &ucp->uc_mcontext.mc_gregs; err = __get_user(pc, &((*grp)[MC_PC])); err |= __get_user(npc, &((*grp)[MC_NPC])); if (err || ((pc | npc) & 3)) goto do_sigsegv; if (regs->u_regs[UREG_I1]) { sigset_t set; if (_NSIG_WORDS == 1) { if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0])) goto do_sigsegv; } else { if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) goto do_sigsegv; } sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; npc &= 0xffffffff; } regs->tpc = pc; regs->tnpc = npc; err |= __get_user(regs->y, &((*grp)[MC_Y])); err |= __get_user(tstate, &((*grp)[MC_TSTATE])); regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1])); err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2])); err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3])); err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4])); err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5])); err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6])); /* Skip %g7 as that's the thread register in userspace. */ err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0])); err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1])); err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2])); err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3])); err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4])); err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5])); err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6])); err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7])); err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp)); err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7)); err |= __put_user(fp, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); err |= __put_user(i7, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab)); if (fenab) { unsigned long *fpregs = current_thread_info()->fpregs; unsigned long fprs; fprs_write(0); err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs)); if (fprs & FPRS_DL) err |= copy_from_user(fpregs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs), (sizeof(unsigned int) * 32)); if (fprs & FPRS_DU) err |= copy_from_user(fpregs+16, ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16, (sizeof(unsigned int) * 32)); err |= __get_user(current_thread_info()->xfsr[0], &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr)); err |= __get_user(current_thread_info()->gsr[0], &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr)); regs->tstate &= ~TSTATE_PEF; } if (err) goto do_sigsegv; return; do_sigsegv: force_sig(SIGSEGV, current); }
asmlinkage void do_sigreturn(struct pt_regs *regs) { struct signal_frame __user *sf; unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; synchronize_user_stack(); sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) goto segv_and_exit; if (((unsigned long) sf) & 3) goto segv_and_exit; err = __get_user(pc, &sf->info.si_regs.pc); err |= __get_user(npc, &sf->info.si_regs.npc); if ((pc | npc) & 3) goto segv_and_exit; /* 2. Restore the state */ up_psr = regs->psr; err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs)); /* User can only change condition codes and FPU enabling in %psr. */ regs->psr = (up_psr & ~(PSR_ICC | PSR_EF)) | (regs->psr & (PSR_ICC | PSR_EF)); /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); if (fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __get_user(rwin_save, &sf->rwin_save); if (rwin_save) err |= restore_rwin_state(rwin_save); /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. */ err |= __get_user(set.sig[0], &sf->info.si_mask); err |= __copy_from_user(&set.sig[1], &sf->extramask, (_NSIG_WORDS-1) * sizeof(unsigned int)); if (err) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return; segv_and_exit: force_sig(SIGSEGV, current); }
/* set the current selection. Invoked by ioctl() or by kernel code. */ int set_selection(const unsigned long arg, struct tty_struct *tty, int user) { int sel_mode, new_sel_start, new_sel_end, spc; char *bp, *obp; int i, ps, pe; unsigned int currcons = fg_console; #ifdef CONFIG_CONSOLE_PM // bushi unblank_screen(); poke_blanked_console(); #endif { unsigned short *args, xs, ys, xe, ye; args = (unsigned short *)(arg + 1); if (user) { if (verify_area(VERIFY_READ, args, sizeof(short) * 5)) return -EFAULT; __get_user(xs, args++); __get_user(ys, args++); __get_user(xe, args++); __get_user(ye, args++); __get_user(sel_mode, args); } else { xs = *(args++); /* set selection from kernel */ ys = *(args++); xe = *(args++); ye = *(args++); sel_mode = *args; } xs--; ys--; xe--; ye--; xs = limit(xs, video_num_columns - 1); ys = limit(ys, video_num_lines - 1); xe = limit(xe, video_num_columns - 1); ye = limit(ye, video_num_lines - 1); ps = ys * video_size_row + (xs << 1); pe = ye * video_size_row + (xe << 1); if (sel_mode == 4) { /* useful for screendump without selection highlights */ clear_selection(); return 0; } if (mouse_reporting() && (sel_mode & 16)) { mouse_report(tty, sel_mode & 15, xs, ys); return 0; } } if (ps > pe) /* make sel_start <= sel_end */ { int tmp = ps; ps = pe; pe = tmp; } if (sel_cons != fg_console) { clear_selection(); sel_cons = fg_console; } switch (sel_mode) { case 0: /* character-by-character selection */ new_sel_start = ps; new_sel_end = pe; break; case 1: /* word-by-word selection */ spc = isspace(sel_pos(ps)); for (new_sel_start = ps; ; ps -= 2) { if ((spc && !isspace(sel_pos(ps))) || (!spc && !inword(sel_pos(ps)))) break; new_sel_start = ps; if (!(ps % video_size_row)) break; } spc = isspace(sel_pos(pe)); for (new_sel_end = pe; ; pe += 2) { if ((spc && !isspace(sel_pos(pe))) || (!spc && !inword(sel_pos(pe)))) break; new_sel_end = pe; if (!((pe + 2) % video_size_row)) break; } break; case 2: /* line-by-line selection */ new_sel_start = ps - ps % video_size_row; new_sel_end = pe + video_size_row - pe % video_size_row - 2; break; case 3: highlight_pointer(pe); return 0; default: return -EINVAL; } /* remove the pointer */ highlight_pointer(-1); /* select to end of line if on trailing space */ if (new_sel_end > new_sel_start && !atedge(new_sel_end, video_size_row) && isspace(sel_pos(new_sel_end))) { for (pe = new_sel_end + 2; ; pe += 2) if (!isspace(sel_pos(pe)) || atedge(pe, video_size_row)) break; if (isspace(sel_pos(pe))) new_sel_end = pe; } if (sel_start == -1) /* no current selection */ highlight(new_sel_start, new_sel_end); else if (new_sel_start == sel_start) { if (new_sel_end == sel_end) /* no action required */ return 0; else if (new_sel_end > sel_end) /* extend to right */ highlight(sel_end + 2, new_sel_end); else /* contract from right */ highlight(new_sel_end + 2, sel_end); } else if (new_sel_end == sel_end) { if (new_sel_start < sel_start) /* extend to left */ highlight(new_sel_start, sel_start - 2); else /* contract from left */ highlight(sel_start, new_sel_start - 2); } else /* some other case; start selection from scratch */ { clear_selection(); highlight(new_sel_start, new_sel_end); } sel_start = new_sel_start; sel_end = new_sel_end; /* Allocate a new buffer before freeing the old one ... */ bp = kmalloc((sel_end-sel_start)/2+1, GFP_KERNEL); if (!bp) { printk(KERN_WARNING "selection: kmalloc() failed\n"); clear_selection(); return -ENOMEM; } if (sel_buffer) kfree(sel_buffer); sel_buffer = bp; obp = bp; for (i = sel_start; i <= sel_end; i += 2) { *bp = sel_pos(i); if (!isspace(*bp++)) obp = bp; if (! ((i + 2) % video_size_row)) { /* strip trailing blanks from line and add newline, unless non-space at end of line. */ if (obp != bp) { bp = obp; *bp++ = '\r'; } obp = bp; } } sel_buffer_lth = bp - sel_buffer; return 0; }
int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; }
static long restore_tm_sigcontexts(struct pt_regs *regs, struct sigcontext __user *sc, struct sigcontext __user *tm_sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs, *tm_v_regs; #endif unsigned long err = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* copy the GPRs */ err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); err |= __copy_from_user(¤t->thread.ckpt_regs, sc->gp_regs, sizeof(regs->gpr)); /* * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. * TEXASR was set by the signal delivery reclaim, as was TFIAR. * Users doing anything abhorrent like thread-switching w/ signals for * TM-Suspended code will have to back TEXASR/TFIAR up themselves. * For the case of getting a signal and simply returning from it, * we don't need to re-copy them here. */ err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); /* pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* The following non-GPR non-FPR non-VR state is also checkpointed: */ err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); err |= __get_user(current->thread.ckpt_regs.ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(current->thread.ckpt_regs.link, &sc->gp_regs[PT_LNK]); err |= __get_user(current->thread.ckpt_regs.xer, &sc->gp_regs[PT_XER]); err |= __get_user(current->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); /* These regs are not checkpointed; they can go in 'regs'. */ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); err |= __get_user(tm_v_regs, &tm_sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; if (tm_v_regs && !access_ok(VERIFY_READ, tm_v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) { err |= __copy_from_user(current->thread.vr, v_regs, 33 * sizeof(vector128)); err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); } else if (current->thread.used_vr) { memset(current->thread.vr, 0, 33 * sizeof(vector128)); memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != 0 && tm_v_regs != 0) { err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); err |= __get_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); } else { current->thread.vrsave = 0; current->thread.transact_vrsave = 0; } #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ if (v_regs && ((msr & MSR_VSX) != 0)) { v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; err |= copy_vsx_from_user(current, v_regs); err |= copy_transact_vsx_from_user(current, tm_v_regs); } else { for (i = 0; i < 32 ; i++) { current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; } } #endif tm_enable(); /* Make sure the transaction is marked as failed */ current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } #endif return err; }
/* There is a lot of hair here because the alignment rules (and * thus placement) of cmsg headers and length are different for * 32-bit apps. -DaveM */ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, unsigned char *stackbuf, int stackbuf_size) { struct compat_cmsghdr __user *ucmsg; struct cmsghdr *kcmsg, *kcmsg_base; compat_size_t ucmlen; __kernel_size_t kcmlen, tmp; int err = -EFAULT; kcmlen = 0; kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf; ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg); while (ucmsg != NULL) { if (get_user(ucmlen, &ucmsg->cmsg_len)) return -EFAULT; /* Catch bogons. */ if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg)) return -EINVAL; tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) + CMSG_ALIGN(sizeof(struct cmsghdr))); tmp = CMSG_ALIGN(tmp); kcmlen += tmp; ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); } if (kcmlen == 0) return -EINVAL; /* The kcmlen holds the 64-bit version of the control length. * It may not be modified as we do not stick it into the kmsg * until we have successfully copied over all of the data * from the user. */ if (kcmlen > stackbuf_size) kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL); if (kcmsg == NULL) return -ENOBUFS; /* Now copy them over neatly. */ memset(kcmsg, 0, kcmlen); ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg); while (ucmsg != NULL) { if (__get_user(ucmlen, &ucmsg->cmsg_len)) goto Efault; if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg)) goto Einval; tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) + CMSG_ALIGN(sizeof(struct cmsghdr))); if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp)) goto Einval; kcmsg->cmsg_len = tmp; tmp = CMSG_ALIGN(tmp); if (__get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level) || __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type) || copy_from_user(CMSG_DATA(kcmsg), CMSG_COMPAT_DATA(ucmsg), (ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))))) goto Efault; /* Advance. */ kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp); ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); } /* Ok, looks like we made it. Hook it up and return success. */ kmsg->msg_control = kcmsg_base; kmsg->msg_controllen = kcmlen; return 0; Einval: err = -EINVAL; Efault: if (kcmsg_base != (struct cmsghdr *)stackbuf) sock_kfree_s(sk, kcmsg_base, kcmlen); return err; }
asmlinkage void irix_sigreturn(struct pt_regs *regs) { struct sigctx_irix5 *context, *magic; unsigned long umask, mask; u64 *fregs; int sig, i, base = 0; sigset_t blocked; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (regs->regs[2] == 1000) base = 1; context = (struct sigctx_irix5 *) regs->regs[base + 4]; magic = (struct sigctx_irix5 *) regs->regs[base + 5]; sig = (int) regs->regs[base + 6]; #ifdef DEBUG_SIG printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n", current->comm, current->pid, context, magic, sig); #endif if (!context) context = magic; if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5))) goto badframe; #ifdef DEBUG_SIG dump_irix5_sigctx(context); #endif __get_user(regs->cp0_epc, &context->pc); umask = context->rmask; mask = 2; for (i = 1; i < 32; i++, mask <<= 1) { if(umask & mask) __get_user(regs->regs[i], &context->regs[i]); } __get_user(regs->hi, &context->hi); __get_user(regs->lo, &context->lo); if ((umask & 1) && context->usedfp) { fregs = (u64 *) ¤t->thread.fpu; for(i = 0; i < 32; i++) fregs[i] = (u64) context->fpregs[i]; __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr); } /* XXX do sigstack crapola here... XXX */ if (__copy_from_user(&blocked, &context->sigset, sizeof(blocked))) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = blocked; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); /* * Don't let your children do this ... */ if (current_thread_info()->flags & TIF_SYSCALL_TRACE) do_syscall_trace(regs, 1); __asm__ __volatile__( "move\t$29,%0\n\t" "j\tsyscall_exit" :/* no outputs */ :"r" (®s)); /* Unreached */ badframe: force_sig(SIGSEGV, current); }
int compat_mc_setsockopt(struct sock *sock, int level, int optname, char __user *optval, unsigned int optlen, int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int)) { char __user *koptval = optval; int koptlen = optlen; switch (optname) { case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct compat_group_req __user *gr32 = (void *)optval; struct group_req __user *kgr = compat_alloc_user_space(sizeof(struct group_req)); u32 interface; if (!access_ok(VERIFY_READ, gr32, sizeof(*gr32)) || !access_ok(VERIFY_WRITE, kgr, sizeof(struct group_req)) || __get_user(interface, &gr32->gr_interface) || __put_user(interface, &kgr->gr_interface) || copy_in_user(&kgr->gr_group, &gr32->gr_group, sizeof(kgr->gr_group))) return -EFAULT; koptval = (char __user *)kgr; koptlen = sizeof(struct group_req); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct compat_group_source_req __user *gsr32 = (void *)optval; struct group_source_req __user *kgsr = compat_alloc_user_space( sizeof(struct group_source_req)); u32 interface; if (!access_ok(VERIFY_READ, gsr32, sizeof(*gsr32)) || !access_ok(VERIFY_WRITE, kgsr, sizeof(struct group_source_req)) || __get_user(interface, &gsr32->gsr_interface) || __put_user(interface, &kgsr->gsr_interface) || copy_in_user(&kgsr->gsr_group, &gsr32->gsr_group, sizeof(kgsr->gsr_group)) || copy_in_user(&kgsr->gsr_source, &gsr32->gsr_source, sizeof(kgsr->gsr_source))) return -EFAULT; koptval = (char __user *)kgsr; koptlen = sizeof(struct group_source_req); break; } case MCAST_MSFILTER: { struct compat_group_filter __user *gf32 = (void *)optval; struct group_filter __user *kgf; u32 interface, fmode, numsrc; if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) || __get_user(interface, &gf32->gf_interface) || __get_user(fmode, &gf32->gf_fmode) || __get_user(numsrc, &gf32->gf_numsrc)) return -EFAULT; koptlen = optlen + sizeof(struct group_filter) - sizeof(struct compat_group_filter); if (koptlen < GROUP_FILTER_SIZE(numsrc)) return -EINVAL; kgf = compat_alloc_user_space(koptlen); if (!access_ok(VERIFY_WRITE, kgf, koptlen) || __put_user(interface, &kgf->gf_interface) || __put_user(fmode, &kgf->gf_fmode) || __put_user(numsrc, &kgf->gf_numsrc) || copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group)) || (numsrc && copy_in_user(kgf->gf_slist, gf32->gf_slist, numsrc * sizeof(kgf->gf_slist[0])))) return -EFAULT; koptval = (char __user *)kgf; break; } default: break; } return setsockopt(sock, level, optname, koptval, koptlen); }
int do_syslog(int type, char __user *buf, int len, bool from_file) { unsigned i, j, limit, count; int do_clear = 0; char c; int error; error = check_syslog_permissions(type, from_file); if (error) goto out; error = security_syslog(type); if (error) return error; switch (type) { case SYSLOG_ACTION_CLOSE: /* Close log */ break; case SYSLOG_ACTION_OPEN: /* Open log */ break; case SYSLOG_ACTION_READ: /* Read from log */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) goto out; i = 0; spin_lock_irq(&logbuf_lock); while (!error && (log_start != log_end) && i < len) { c = LOG_BUF(log_start); log_start++; spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (!error) error = i; break; /* Read/clear last kernel messages */ case SYSLOG_ACTION_READ_CLEAR: do_clear = 1; /* FALL THRU */ /* Read last kernel messages */ case SYSLOG_ACTION_READ_ALL: error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } count = len; if (count > log_buf_len) count = log_buf_len; spin_lock_irq(&logbuf_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages * we try to copy to user space. Therefore * the messages are copied in reverse. <manfreds> */ for (i = 0; i < count && !error; i++) { j = limit-1-i; if (j + log_buf_len < log_end) break; c = LOG_BUF(j); spin_unlock_irq(&logbuf_lock); error = __put_user(c,&buf[count-1-i]); cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (error) break; error = i; if (i != count) { int offset = count-error; /* buffer overflow during copy, correct user buffer. */ for (i = 0; i < error; i++) { if (__get_user(c,&buf[i+offset]) || __put_user(c,&buf[i])) { error = -EFAULT; break; } cond_resched(); } } break; /* Clear ring buffer */ case SYSLOG_ACTION_CLEAR: logged_chars = 0; break; /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_OFF: if (saved_console_loglevel == -1) saved_console_loglevel = console_loglevel; console_loglevel = minimum_console_loglevel; break; /* Enable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: if (saved_console_loglevel != -1) { console_loglevel = saved_console_loglevel; saved_console_loglevel = -1; } break; /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: error = -EINVAL; if (len < 1 || len > 8) goto out; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; /* Implicitly re-enable logging to console */ saved_console_loglevel = -1; error = 0; break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: error = log_end - log_start; break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: error = log_buf_len; break; default: error = -EINVAL; break; } out: return error; }
void compat_show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs, int debug_stack_lines) { unsigned int i, *stack, addr, mask = STACK_SIZE; stack = (unsigned int *)(unsigned long)regs->_esp; printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack); if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) ) { printk("Guest-inaccessible memory.\n"); return; } if ( v != current ) { struct vcpu *vcpu; unsigned long mfn; ASSERT(guest_kernel_mode(v, regs)); mfn = read_cr3() >> PAGE_SHIFT; for_each_vcpu( v->domain, vcpu ) if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn ) break; if ( !vcpu ) { stack = do_page_walk(v, (unsigned long)stack); if ( (unsigned long)stack < PAGE_SIZE ) { printk("Inaccessible guest memory.\n"); return; } mask = PAGE_SIZE; } } for ( i = 0; i < debug_stack_lines * 8; i++ ) { if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask ) break; if ( __get_user(addr, stack) ) { if ( i != 0 ) printk("\n "); printk("Fault while accessing guest memory."); i = 1; break; } if ( (i != 0) && ((i % 8) == 0) ) printk("\n "); printk(" %08x", addr); stack++; } if ( mask == PAGE_SIZE ) { BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE); unmap_domain_page(stack); } if ( i == 0 ) printk("Stack empty."); printk("\n"); }