int linux_shmat(struct thread *td, struct linux_shmat_args *args) { struct shmat_args /* { int shmid; void *shmaddr; int shmflg; } */ bsd_args; int error; #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) l_uintptr_t addr; #endif bsd_args.shmid = args->shmid; bsd_args.shmaddr = PTRIN(args->shmaddr); bsd_args.shmflg = args->shmflg; if ((error = shmat(td, &bsd_args))) return error; #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) addr = td->td_retval[0]; if ((error = copyout(&addr, PTRIN(args->raddr), sizeof(addr)))) return error; td->td_retval[0] = 0; #endif return 0; }
int linux_msgctl(struct thread *td, struct linux_msgctl_args *args) { int error, bsd_cmd; struct l_msqid_ds linux_msqid; struct msqid_ds bsd_msqid; bsd_cmd = args->cmd & ~LINUX_IPC_64; switch (bsd_cmd) { case LINUX_IPC_INFO: case LINUX_MSG_INFO: { struct l_msginfo linux_msginfo; /* * XXX MSG_INFO uses the same data structure but returns different * dynamic counters in msgpool, msgmap, and msgtql fields. */ linux_msginfo.msgpool = (long)msginfo.msgmni * (long)msginfo.msgmnb / 1024L; /* XXX MSG_INFO. */ linux_msginfo.msgmap = msginfo.msgmnb; /* XXX MSG_INFO. */ linux_msginfo.msgmax = msginfo.msgmax; linux_msginfo.msgmnb = msginfo.msgmnb; linux_msginfo.msgmni = msginfo.msgmni; linux_msginfo.msgssz = msginfo.msgssz; linux_msginfo.msgtql = msginfo.msgtql; /* XXX MSG_INFO. */ linux_msginfo.msgseg = msginfo.msgseg; error = copyout(&linux_msginfo, PTRIN(args->buf), sizeof(linux_msginfo)); if (error == 0) td->td_retval[0] = msginfo.msgmni; /* XXX */ return (error); } case LINUX_IPC_SET: error = linux_msqid_pullup(args->cmd & LINUX_IPC_64, &linux_msqid, PTRIN(args->buf)); if (error) return (error); linux_to_bsd_msqid_ds(&linux_msqid, &bsd_msqid); break; } error = kern_msgctl(td, args->msqid, bsd_cmd, &bsd_msqid); if (error != 0) if (bsd_cmd != LINUX_IPC_RMID || error != EINVAL) return (error); if (bsd_cmd == LINUX_IPC_STAT) { bsd_to_linux_msqid_ds(&bsd_msqid, &linux_msqid); return (linux_msqid_pushdown(args->cmd & LINUX_IPC_64, &linux_msqid, PTRIN(args->buf))); } return (0); }
static int linux_to_bsd_msghdr(struct msghdr *bhdr, const struct l_msghdr *lhdr) { if (lhdr->msg_controllen > INT_MAX) return (ENOBUFS); bhdr->msg_name = PTRIN(lhdr->msg_name); bhdr->msg_namelen = lhdr->msg_namelen; bhdr->msg_iov = PTRIN(lhdr->msg_iov); bhdr->msg_iovlen = lhdr->msg_iovlen; bhdr->msg_control = PTRIN(lhdr->msg_control); bhdr->msg_controllen = lhdr->msg_controllen; bhdr->msg_flags = linux_to_bsd_msg_flags(lhdr->msg_flags); return (0); }
int linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) { stack_t ss, oss; l_stack_t lss; int error; LINUX_CTR2(sigaltstack, "%p, %p", uap->uss, uap->uoss); if (uap->uss != NULL) { error = copyin(uap->uss, &lss, sizeof(l_stack_t)); if (error) return (error); ss.ss_sp = PTRIN(lss.ss_sp); ss.ss_size = lss.ss_size; ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); } error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL, (uap->uoss != NULL) ? &oss : NULL); if (!error && uap->uoss != NULL) { lss.ss_sp = PTROUT(oss.ss_sp); lss.ss_size = oss.ss_size; lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); } return (error); }
int linux32_copyiniov(struct l_iovec32 *iovp32, l_ulong iovcnt, struct iovec **iovp, int error) { struct l_iovec32 iov32; struct iovec *iov; uint32_t iovlen; int i; *iovp = NULL; if (iovcnt > UIO_MAXIOV) return (error); iovlen = iovcnt * sizeof(struct iovec); iov = malloc(iovlen, M_IOV, M_WAITOK); for (i = 0; i < iovcnt; i++) { error = copyin(&iovp32[i], &iov32, sizeof(struct l_iovec32)); if (error) { free(iov, M_IOV); return (error); } iov[i].iov_base = PTRIN(iov32.iov_base); iov[i].iov_len = iov32.iov_len; } *iovp = iov; return(0); }
static int linux_accept_common(struct thread *td, int s, l_uintptr_t addr, l_uintptr_t namelen, int flags) { struct accept_args /* { int s; struct sockaddr * __restrict name; socklen_t * __restrict anamelen; } */ bsd_args; int error; if (flags & ~(LINUX_SOCK_CLOEXEC | LINUX_SOCK_NONBLOCK)) return (EINVAL); bsd_args.s = s; /* XXX: */ bsd_args.name = (struct sockaddr * __restrict)PTRIN(addr); bsd_args.anamelen = PTRIN(namelen);/* XXX */ error = accept(td, &bsd_args); bsd_to_linux_sockaddr((struct sockaddr *)bsd_args.name); if (error) { if (error == EFAULT && namelen != sizeof(struct sockaddr_in)) return (EINVAL); return (error); } /* * linux appears not to copy flags from the parent socket to the * accepted one, so we must clear the flags in the new descriptor * and apply the requested flags. */ error = kern_fcntl(td, td->td_retval[0], F_SETFL, 0); if (error) goto out; error = linux_set_socket_flags(td, td->td_retval[0], flags); if (error) goto out; if (addr) error = linux_sa_put(PTRIN(addr)); out: if (error) { (void)kern_close(td, td->td_retval[0]); td->td_retval[0] = 0; } return (error); }
/* * Updated sendto() when IP_HDRINCL is set: * tweak endian-dependent fields in the IP packet. */ static int linux_sendto_hdrincl(struct thread *td, struct linux_sendto_args *linux_args) { /* * linux_ip_copysize defines how many bytes we should copy * from the beginning of the IP packet before we customize it for BSD. * It should include all the fields we modify (ip_len and ip_off). */ #define linux_ip_copysize 8 struct ip *packet; struct msghdr msg; struct iovec aiov[1]; int error; /* Check that the packet isn't too big or too small. */ if (linux_args->len < linux_ip_copysize || linux_args->len > IP_MAXPACKET) return (EINVAL); packet = (struct ip *)malloc(linux_args->len, M_TEMP, M_WAITOK); /* Make kernel copy of the packet to be sent */ if ((error = copyin(PTRIN(linux_args->msg), packet, linux_args->len))) goto goout; /* Convert fields from Linux to BSD raw IP socket format */ packet->ip_len = linux_args->len; packet->ip_off = ntohs(packet->ip_off); /* Prepare the msghdr and iovec structures describing the new packet */ msg.msg_name = PTRIN(linux_args->to); msg.msg_namelen = linux_args->tolen; msg.msg_iov = aiov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_flags = 0; aiov[0].iov_base = (char *)packet; aiov[0].iov_len = linux_args->len; error = linux_sendit(td, linux_args->s, &msg, linux_args->flags, NULL, UIO_SYSSPACE); goout: free(packet, M_TEMP); return (error); }
static void linux_to_bsd_semid_ds(struct l_semid_ds *lsp, struct semid_ds *bsp) { linux_to_bsd_ipc_perm(&lsp->sem_perm, &bsp->sem_perm); bsp->sem_otime = lsp->sem_otime; bsp->sem_ctime = lsp->sem_ctime; bsp->sem_nsems = lsp->sem_nsems; bsp->sem_base = PTRIN(lsp->sem_base); }
int linux_sysctl(struct thread *td, struct linux_sysctl_args *args) { struct l___sysctl_args la; struct sbuf *sb; l_int *mib; int error, i; error = copyin(args->args, &la, sizeof(la)); if (error) return (error); if (la.nlen <= 0 || la.nlen > LINUX_CTL_MAXNAME) return (ENOTDIR); mib = malloc(la.nlen * sizeof(l_int), M_TEMP, M_WAITOK); error = copyin(PTRIN(la.name), mib, la.nlen * sizeof(l_int)); if (error) { free(mib, M_TEMP); return (error); } switch (mib[0]) { case LINUX_CTL_KERN: if (la.nlen < 2) break; switch (mib[1]) { case LINUX_KERN_VERSION: error = handle_string(&la, version); free(mib, M_TEMP); return (error); default: break; } break; default: break; } sb = sbuf_new(NULL, NULL, 20 + la.nlen * 5, SBUF_AUTOEXTEND); if (sb == NULL) { linux_msg(td, "sysctl is not implemented"); } else { sbuf_printf(sb, "sysctl "); for (i = 0; i < la.nlen; i++) sbuf_printf(sb, "%c%d", (i) ? ',' : '{', mib[i]); sbuf_printf(sb, "} is not implemented"); sbuf_finish(sb); linux_msg(td, "%s", sbuf_data(sb)); sbuf_delete(sb); } free(mib, M_TEMP); return (ENOTDIR); }
static int handle_string(struct l___sysctl_args *la, char *value) { int error; if (la->oldval != 0) { l_int len = strlen(value); error = copyout(value, PTRIN(la->oldval), len + 1); if (!error && la->oldlenp != 0) error = copyout(&len, PTRIN(la->oldlenp), sizeof(len)); if (error) return (error); } if (la->newval != 0) return (ENOTDIR); return (0); }
static int handle_string(struct l___sysctl_args *la, char *value) { int error; LIN_SDT_PROBE2(sysctl, handle_string, entry, la, value); if (la->oldval != 0) { l_int len = strlen(value); error = copyout(value, PTRIN(la->oldval), len + 1); if (!error && la->oldlenp != 0) error = copyout(&len, PTRIN(la->oldlenp), sizeof(len)); if (error) { LIN_SDT_PROBE1(sysctl, handle_string, copyout_error, error); LIN_SDT_PROBE1(sysctl, handle_string, return, error); return (error); } }
int linux_shmdt(struct thread *td, struct linux_shmdt_args *args) { struct shmdt_args /* { void *shmaddr; } */ bsd_args; bsd_args.shmaddr = PTRIN(args->shmaddr); return shmdt(td, &bsd_args); }
int linux_semop(struct thread *td, struct linux_semop_args *args) { struct semop_args /* { int semid; struct sembuf *sops; int nsops; } */ bsd_args; bsd_args.semid = args->semid; bsd_args.sops = PTRIN(args->tsops); bsd_args.nsops = args->nsops; return semop(td, &bsd_args); }
/* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ static int ia32_set_mcontext(struct thread *td, const struct ia32_mcontext *mcp) { struct trapframe *tp; char *xfpustate; long rflags; int ret; tp = td->td_frame; if (mcp->mc_len != sizeof(*mcp)) return (EINVAL); rflags = (mcp->mc_eflags & PSL_USERCHANGE) | (tp->tf_rflags & ~PSL_USERCHANGE); if (mcp->mc_flags & _MC_IA32_HASFPXSTATE) { if (mcp->mc_xfpustate_len > cpu_max_ext_state_size - sizeof(struct savefpu)) return (EINVAL); xfpustate = __builtin_alloca(mcp->mc_xfpustate_len); ret = copyin(PTRIN(mcp->mc_xfpustate), xfpustate, mcp->mc_xfpustate_len); if (ret != 0) return (ret); } else xfpustate = NULL; ret = ia32_set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len); if (ret != 0) return (ret); tp->tf_gs = mcp->mc_gs; tp->tf_fs = mcp->mc_fs; tp->tf_es = mcp->mc_es; tp->tf_ds = mcp->mc_ds; tp->tf_flags = TF_HASSEGS; tp->tf_rdi = mcp->mc_edi; tp->tf_rsi = mcp->mc_esi; tp->tf_rbp = mcp->mc_ebp; tp->tf_rbx = mcp->mc_ebx; tp->tf_rdx = mcp->mc_edx; tp->tf_rcx = mcp->mc_ecx; tp->tf_rax = mcp->mc_eax; /* trapno, err */ tp->tf_rip = mcp->mc_eip; tp->tf_rflags = rflags; tp->tf_rsp = mcp->mc_esp; tp->tf_ss = mcp->mc_ss; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); return (0); }
static int linux_bind(struct thread *td, struct linux_bind_args *args) { struct sockaddr *sa; int error; error = linux_getsockaddr(&sa, PTRIN(args->name), args->namelen); if (error) return (error); error = kern_bind(td, args->s, sa); free(sa, M_SONAME); if (error == EADDRNOTAVAIL && args->namelen != sizeof(struct sockaddr_in)) return (EINVAL); return (error); }
int linux_writev(struct thread *td, struct linux_writev_args *uap) { int error, i, nsize, osize; caddr_t sg; struct writev_args /* { syscallarg(int) fd; syscallarg(struct iovec *) iovp; syscallarg(u_int) iovcnt; } */ a; struct iovec32 *oio; struct iovec *nio; sg = stackgap_init(); if (uap->iovcnt > (STACKGAPLEN / sizeof (struct iovec))) return (EINVAL); osize = uap->iovcnt * sizeof (struct iovec32); nsize = uap->iovcnt * sizeof (struct iovec); oio = malloc(osize, M_TEMP, M_WAITOK); nio = malloc(nsize, M_TEMP, M_WAITOK); error = 0; if ((error = copyin(uap->iovp, oio, osize))) goto punt; for (i = 0; i < uap->iovcnt; i++) { nio[i].iov_base = PTRIN(oio[i].iov_base); nio[i].iov_len = oio[i].iov_len; } a.fd = uap->fd; a.iovp = stackgap_alloc(&sg, nsize); a.iovcnt = uap->iovcnt; if ((error = copyout(nio, (caddr_t)a.iovp, nsize))) goto punt; error = writev(td, &a); punt: free(oio, M_TEMP); free(nio, M_TEMP); return (error); }
int linux_msgrcv(struct thread *td, struct linux_msgrcv_args *args) { void *msgp; long mtype; l_long lmtype; int error; if ((l_long)args->msgsz < 0 || args->msgsz > (l_long)msginfo.msgmax) return (EINVAL); msgp = PTRIN(args->msgp); if ((error = kern_msgrcv(td, args->msqid, (char *)msgp + sizeof(lmtype), args->msgsz, args->msgtyp, args->msgflg, &mtype)) != 0) return (error); lmtype = (l_long)mtype; return (copyout(&lmtype, msgp, sizeof(lmtype))); }
int linux_msgsnd(struct thread *td, struct linux_msgsnd_args *args) { const void *msgp; long mtype; l_long lmtype; int error; if ((l_long)args->msgsz < 0 || args->msgsz > (l_long)msginfo.msgmax) return (EINVAL); msgp = PTRIN(args->msgp); if ((error = copyin(msgp, &lmtype, sizeof(lmtype))) != 0) return (error); mtype = (long)lmtype; return (kern_msgsnd(td, args->msqid, (const char *)msgp + sizeof(lmtype), args->msgsz, args->msgflg, mtype)); }
int linux_connect(struct thread *td, struct linux_connect_args *args) { struct socket *so; struct sockaddr *sa; u_int fflag; int error; error = linux_getsockaddr(&sa, (struct osockaddr *)PTRIN(args->name), args->namelen); if (error) return (error); error = kern_connect(td, args->s, sa); free(sa, M_SONAME); if (error != EISCONN) return (error); /* * Linux doesn't return EISCONN the first time it occurs, * when on a non-blocking socket. Instead it returns the * error getsockopt(SOL_SOCKET, SO_ERROR) would return on BSD. * * XXXRW: Instead of using fgetsock(), check that it is a * socket and use the file descriptor reference instead of * creating a new one. */ error = fgetsock(td, args->s, &so, &fflag); if (error == 0) { error = EISCONN; if (fflag & FNONBLOCK) { SOCK_LOCK(so); if (so->so_emuldata == 0) error = so->so_error; so->so_emuldata = (void *)1; SOCK_UNLOCK(so); } fputsock(so); } return (error); }
static int linux32_copyinuio(struct l_iovec32 *iovp, l_ulong iovcnt, struct uio **uiop) { struct l_iovec32 iov32; struct iovec *iov; struct uio *uio; uint32_t iovlen; int error, i; *uiop = NULL; if (iovcnt > UIO_MAXIOV) return (EINVAL); iovlen = iovcnt * sizeof(struct iovec); uio = malloc(iovlen + sizeof(*uio), M_IOV, M_WAITOK); iov = (struct iovec *)(uio + 1); for (i = 0; i < iovcnt; i++) { error = copyin(&iovp[i], &iov32, sizeof(struct l_iovec32)); if (error) { free(uio, M_IOV); return (error); } iov[i].iov_base = PTRIN(iov32.iov_base); iov[i].iov_len = iov32.iov_len; } uio->uio_iov = iov; uio->uio_iovcnt = iovcnt; uio->uio_segflg = UIO_USERSPACE; uio->uio_offset = -1; uio->uio_resid = 0; for (i = 0; i < iovcnt; i++) { if (iov->iov_len > INT_MAX - uio->uio_resid) { free(uio, M_IOV); return (EINVAL); } uio->uio_resid += iov->iov_len; iov++; } *uiop = uio; return (0); }
static void linux_to_bsd_sigaction(l_sigaction_t *lsa, struct sigaction *bsa) { linux_to_bsd_sigset(&lsa->lsa_mask, &bsa->sa_mask); bsa->sa_handler = PTRIN(lsa->lsa_handler); bsa->sa_flags = 0; if (lsa->lsa_flags & LINUX_SA_NOCLDSTOP) bsa->sa_flags |= SA_NOCLDSTOP; if (lsa->lsa_flags & LINUX_SA_NOCLDWAIT) bsa->sa_flags |= SA_NOCLDWAIT; if (lsa->lsa_flags & LINUX_SA_SIGINFO) bsa->sa_flags |= SA_SIGINFO; if (lsa->lsa_flags & LINUX_SA_ONSTACK) bsa->sa_flags |= SA_ONSTACK; if (lsa->lsa_flags & LINUX_SA_RESTART) bsa->sa_flags |= SA_RESTART; if (lsa->lsa_flags & LINUX_SA_ONESHOT) bsa->sa_flags |= SA_RESETHAND; if (lsa->lsa_flags & LINUX_SA_NOMASK) bsa->sa_flags |= SA_NODEFER; }
int linux_semctl(struct thread *td, struct linux_semctl_args *args) { struct l_semid_ds linux_semid; struct l_seminfo linux_seminfo; struct semid_ds semid; union semun semun; register_t rval; int cmd, error; switch (args->cmd & ~LINUX_IPC_64) { case LINUX_IPC_RMID: cmd = IPC_RMID; break; case LINUX_GETNCNT: cmd = GETNCNT; break; case LINUX_GETPID: cmd = GETPID; break; case LINUX_GETVAL: cmd = GETVAL; break; case LINUX_GETZCNT: cmd = GETZCNT; break; case LINUX_SETVAL: cmd = SETVAL; semun.val = args->arg.val; break; case LINUX_IPC_SET: cmd = IPC_SET; error = linux_semid_pullup(args->cmd & LINUX_IPC_64, &linux_semid, PTRIN(args->arg.buf)); if (error) return (error); linux_to_bsd_semid_ds(&linux_semid, &semid); semun.buf = &semid; return (kern_semctl(td, args->semid, args->semnum, cmd, &semun, td->td_retval)); case LINUX_IPC_STAT: case LINUX_SEM_STAT: if ((args->cmd & ~LINUX_IPC_64) == LINUX_IPC_STAT) cmd = IPC_STAT; else cmd = SEM_STAT; semun.buf = &semid; error = kern_semctl(td, args->semid, args->semnum, cmd, &semun, &rval); if (error) return (error); bsd_to_linux_semid_ds(&semid, &linux_semid); error = linux_semid_pushdown(args->cmd & LINUX_IPC_64, &linux_semid, PTRIN(args->arg.buf)); if (error == 0) td->td_retval[0] = (cmd == SEM_STAT) ? rval : 0; return (error); case LINUX_IPC_INFO: case LINUX_SEM_INFO: bcopy(&seminfo, &linux_seminfo.semmni, sizeof(linux_seminfo) - sizeof(linux_seminfo.semmap) ); /* * Linux does not use the semmap field but populates it with * the defined value from SEMMAP, which really is redefined to * SEMMNS, which they define as SEMMNI * SEMMSL. Try to * simulate this returning our dynamic semmns value. */ linux_seminfo.semmap = linux_seminfo.semmns; /* XXX BSD equivalent? #define used_semids 10 #define used_sems 10 linux_seminfo.semusz = used_semids; linux_seminfo.semaem = used_sems; */ error = copyout(&linux_seminfo, PTRIN(args->arg.buf), sizeof(linux_seminfo)); if (error) return (error); td->td_retval[0] = seminfo.semmni; return (0); /* No need for __semctl call */ case LINUX_GETALL: cmd = GETALL; semun.val = args->arg.val; break; case LINUX_SETALL: cmd = SETALL; semun.val = args->arg.val; break; default: linux_msg(td, "ipc type %d is not implemented", args->cmd & ~LINUX_IPC_64); return (EINVAL); } return (kern_semctl(td, args->semid, args->semnum, cmd, &semun, td->td_retval)); }
/*ARGSUSED*/ static int ipmi_ioctl(dev_t dv, int cmd, intptr_t data, int flags, cred_t *cr, int *rvalp) { struct ipmi_device *dev; struct ipmi_request *kreq; struct ipmi_req req; struct ipmi_recv recv; struct ipmi_recv32 recv32; struct ipmi_addr addr; int error, len; model_t model; int orig_cmd = 0; uchar_t t_lun; if (secpolicy_sys_config(cr, B_FALSE) != 0) return (EPERM); if ((dev = lookup_ipmidev_by_dev(dv)) == NULL) return (ENODEV); model = get_udatamodel(); if (model == DATAMODEL_NATIVE) { switch (cmd) { case IPMICTL_SEND_COMMAND: if (copyin((void *)data, &req, sizeof (req))) return (EFAULT); break; case IPMICTL_RECEIVE_MSG_TRUNC: case IPMICTL_RECEIVE_MSG: if (copyin((void *)data, &recv, sizeof (recv))) return (EFAULT); break; } } else { /* Convert 32-bit structures to native. */ struct ipmi_req32 req32; switch (cmd) { case IPMICTL_SEND_COMMAND_32: if (copyin((void *)data, &req32, sizeof (req32))) return (EFAULT); req.addr = PTRIN(req32.addr); req.addr_len = req32.addr_len; req.msgid = req32.msgid; req.msg.netfn = req32.msg.netfn; req.msg.cmd = req32.msg.cmd; req.msg.data_len = req32.msg.data_len; req.msg.data = PTRIN(req32.msg.data); cmd = IPMICTL_SEND_COMMAND; break; case IPMICTL_RECEIVE_MSG_TRUNC_32: case IPMICTL_RECEIVE_MSG_32: if (copyin((void *)data, &recv32, sizeof (recv32))) return (EFAULT); recv.addr = PTRIN(recv32.addr); recv.addr_len = recv32.addr_len; recv.msg.data_len = recv32.msg.data_len; recv.msg.data = PTRIN(recv32.msg.data); orig_cmd = cmd; cmd = (cmd == IPMICTL_RECEIVE_MSG_TRUNC_32) ? IPMICTL_RECEIVE_MSG_TRUNC : IPMICTL_RECEIVE_MSG; break; } } switch (cmd) { case IPMICTL_SEND_COMMAND: IPMI_LOCK(sc); /* clear out old stuff in queue of stuff done */ while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests)) != NULL) { TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); dev->ipmi_requests--; ipmi_free_request(kreq); } IPMI_UNLOCK(sc); /* Check that we didn't get a ridiculous length */ if (req.msg.data_len > IPMI_MAX_RX) return (EINVAL); kreq = ipmi_alloc_request(dev, req.msgid, IPMI_ADDR(req.msg.netfn, 0), req.msg.cmd, req.msg.data_len, IPMI_MAX_RX); /* This struct is the same for 32/64 */ if (req.msg.data_len > 0 && copyin(req.msg.data, kreq->ir_request, req.msg.data_len)) { ipmi_free_request(kreq); return (EFAULT); } IPMI_LOCK(sc); dev->ipmi_requests++; error = sc->ipmi_enqueue_request(sc, kreq); IPMI_UNLOCK(sc); if (error) return (error); break; case IPMICTL_RECEIVE_MSG_TRUNC: case IPMICTL_RECEIVE_MSG: /* This struct is the same for 32/64 */ if (copyin(recv.addr, &addr, sizeof (addr))) return (EFAULT); IPMI_LOCK(sc); kreq = TAILQ_FIRST(&dev->ipmi_completed_requests); if (kreq == NULL) { IPMI_UNLOCK(sc); return (EAGAIN); } addr.channel = IPMI_BMC_CHANNEL; recv.recv_type = IPMI_RESPONSE_RECV_TYPE; recv.msgid = kreq->ir_msgid; recv.msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2; recv.msg.cmd = kreq->ir_command; error = kreq->ir_error; if (error) { TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); dev->ipmi_requests--; IPMI_UNLOCK(sc); ipmi_free_request(kreq); return (error); } len = kreq->ir_replylen + 1; if (recv.msg.data_len < len && cmd == IPMICTL_RECEIVE_MSG) { IPMI_UNLOCK(sc); ipmi_free_request(kreq); return (EMSGSIZE); } TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); dev->ipmi_requests--; IPMI_UNLOCK(sc); len = min(recv.msg.data_len, len); recv.msg.data_len = (unsigned short)len; if (orig_cmd == IPMICTL_RECEIVE_MSG_TRUNC_32 || orig_cmd == IPMICTL_RECEIVE_MSG_32) { /* Update changed fields in 32-bit structure. */ recv32.recv_type = recv.recv_type; recv32.msgid = (int32_t)recv.msgid; recv32.msg.netfn = recv.msg.netfn; recv32.msg.cmd = recv.msg.cmd; recv32.msg.data_len = recv.msg.data_len; error = copyout(&recv32, (void *)data, sizeof (recv32)); } else { error = copyout(&recv, (void *)data, sizeof (recv)); } /* This struct is the same for 32/64 */ if (error == 0) error = copyout(&addr, recv.addr, sizeof (addr)); if (error == 0) error = copyout(&kreq->ir_compcode, recv.msg.data, 1); if (error == 0) error = copyout(kreq->ir_reply, recv.msg.data + 1, len - 1); ipmi_free_request(kreq); if (error) return (EFAULT); break; case IPMICTL_SET_MY_ADDRESS_CMD: IPMI_LOCK(sc); if (copyin((void *)data, &dev->ipmi_address, sizeof (dev->ipmi_address))) { IPMI_UNLOCK(sc); return (EFAULT); } IPMI_UNLOCK(sc); break; case IPMICTL_GET_MY_ADDRESS_CMD: IPMI_LOCK(sc); if (copyout(&dev->ipmi_address, (void *)data, sizeof (dev->ipmi_address))) { IPMI_UNLOCK(sc); return (EFAULT); } IPMI_UNLOCK(sc); break; case IPMICTL_SET_MY_LUN_CMD: IPMI_LOCK(sc); if (copyin((void *)data, &t_lun, sizeof (t_lun))) { IPMI_UNLOCK(sc); return (EFAULT); } dev->ipmi_lun = t_lun & 0x3; IPMI_UNLOCK(sc); break; case IPMICTL_GET_MY_LUN_CMD: IPMI_LOCK(sc); if (copyout(&dev->ipmi_lun, (void *)data, sizeof (dev->ipmi_lun))) { IPMI_UNLOCK(sc); return (EFAULT); } IPMI_UNLOCK(sc); break; case IPMICTL_SET_GETS_EVENTS_CMD: break; case IPMICTL_REGISTER_FOR_CMD: case IPMICTL_UNREGISTER_FOR_CMD: return (EINVAL); default: return (EINVAL); } return (0); }
static int freebsd32_ioctl_pciocgetconf(struct thread *td, struct freebsd32_ioctl_args *uap, struct file *fp) { struct pci_conf_io pci; struct pci_conf_io32 pci32; struct pci_match_conf32 pmc32; struct pci_match_conf32 *pmc32p; struct pci_match_conf pmc; struct pci_match_conf *pmcp; struct pci_conf32 pc32; struct pci_conf32 *pc32p; struct pci_conf pc; struct pci_conf *pcp; u_int32_t i; u_int32_t npat_to_convert; u_int32_t nmatch_to_convert; vm_offset_t addr; int error; if ((error = copyin(uap->data, &pci32, sizeof(pci32))) != 0) return (error); CP(pci32, pci, num_patterns); CP(pci32, pci, offset); CP(pci32, pci, generation); npat_to_convert = pci32.pat_buf_len / sizeof(struct pci_match_conf32); pci.pat_buf_len = npat_to_convert * sizeof(struct pci_match_conf); pci.patterns = NULL; nmatch_to_convert = pci32.match_buf_len / sizeof(struct pci_conf32); pci.match_buf_len = nmatch_to_convert * sizeof(struct pci_conf); pci.matches = NULL; if ((error = copyout_map(td, &addr, pci.pat_buf_len)) != 0) goto cleanup; pci.patterns = (struct pci_match_conf *)addr; if ((error = copyout_map(td, &addr, pci.match_buf_len)) != 0) goto cleanup; pci.matches = (struct pci_conf *)addr; npat_to_convert = min(npat_to_convert, pci.num_patterns); for (i = 0, pmc32p = (struct pci_match_conf32 *)PTRIN(pci32.patterns), pmcp = pci.patterns; i < npat_to_convert; i++, pmc32p++, pmcp++) { if ((error = copyin(pmc32p, &pmc32, sizeof(pmc32))) != 0) goto cleanup; CP(pmc32,pmc,pc_sel); strlcpy(pmc.pd_name, pmc32.pd_name, sizeof(pmc.pd_name)); CP(pmc32,pmc,pd_unit); CP(pmc32,pmc,pc_vendor); CP(pmc32,pmc,pc_device); CP(pmc32,pmc,pc_class); CP(pmc32,pmc,flags); if ((error = copyout(&pmc, pmcp, sizeof(pmc))) != 0) goto cleanup; } if ((error = fo_ioctl(fp, PCIOCGETCONF, (caddr_t)&pci, td->td_ucred, td)) != 0) goto cleanup; nmatch_to_convert = min(nmatch_to_convert, pci.num_matches); for (i = 0, pcp = pci.matches, pc32p = (struct pci_conf32 *)PTRIN(pci32.matches); i < nmatch_to_convert; i++, pcp++, pc32p++) { if ((error = copyin(pcp, &pc, sizeof(pc))) != 0) goto cleanup; CP(pc,pc32,pc_sel); CP(pc,pc32,pc_hdr); CP(pc,pc32,pc_subvendor); CP(pc,pc32,pc_subdevice); CP(pc,pc32,pc_vendor); CP(pc,pc32,pc_device); CP(pc,pc32,pc_class); CP(pc,pc32,pc_subclass); CP(pc,pc32,pc_progif); CP(pc,pc32,pc_revid); strlcpy(pc32.pd_name, pc.pd_name, sizeof(pc32.pd_name)); CP(pc,pc32,pd_unit); if ((error = copyout(&pc32, pc32p, sizeof(pc32))) != 0) goto cleanup; } CP(pci, pci32, num_matches); CP(pci, pci32, offset); CP(pci, pci32, generation); CP(pci, pci32, status); error = copyout(&pci32, uap->data, sizeof(pci32)); cleanup: if (pci.patterns) copyout_unmap(td, (vm_offset_t)pci.patterns, pci.pat_buf_len); if (pci.matches) copyout_unmap(td, (vm_offset_t)pci.matches, pci.match_buf_len); return (error); }
int linux_shmctl(struct thread *td, struct linux_shmctl_args *args) { struct l_shmid_ds linux_shmid; struct l_shminfo linux_shminfo; struct l_shm_info linux_shm_info; struct shmid_ds bsd_shmid; int error; switch (args->cmd & ~LINUX_IPC_64) { case LINUX_IPC_INFO: { struct shminfo bsd_shminfo; /* Perform shmctl wanting removed segments lookup */ error = kern_shmctl(td, args->shmid, IPC_INFO, (void *)&bsd_shminfo, NULL); if (error) return error; bsd_to_linux_shminfo(&bsd_shminfo, &linux_shminfo); return (linux_shminfo_pushdown(args->cmd & LINUX_IPC_64, &linux_shminfo, PTRIN(args->buf))); } case LINUX_SHM_INFO: { struct shm_info bsd_shm_info; /* Perform shmctl wanting removed segments lookup */ error = kern_shmctl(td, args->shmid, SHM_INFO, (void *)&bsd_shm_info, NULL); if (error) return error; bsd_to_linux_shm_info(&bsd_shm_info, &linux_shm_info); return copyout(&linux_shm_info, PTRIN(args->buf), sizeof(struct l_shm_info)); } case LINUX_IPC_STAT: /* Perform shmctl wanting removed segments lookup */ error = kern_shmctl(td, args->shmid, IPC_STAT, (void *)&bsd_shmid, NULL); if (error) return error; bsd_to_linux_shmid_ds(&bsd_shmid, &linux_shmid); return (linux_shmid_pushdown(args->cmd & LINUX_IPC_64, &linux_shmid, PTRIN(args->buf))); case LINUX_SHM_STAT: /* Perform shmctl wanting removed segments lookup */ error = kern_shmctl(td, args->shmid, IPC_STAT, (void *)&bsd_shmid, NULL); if (error) return error; bsd_to_linux_shmid_ds(&bsd_shmid, &linux_shmid); return (linux_shmid_pushdown(args->cmd & LINUX_IPC_64, &linux_shmid, PTRIN(args->buf))); case LINUX_IPC_SET: error = linux_shmid_pullup(args->cmd & LINUX_IPC_64, &linux_shmid, PTRIN(args->buf)); if (error) return error; linux_to_bsd_shmid_ds(&linux_shmid, &bsd_shmid); /* Perform shmctl wanting removed segments lookup */ return kern_shmctl(td, args->shmid, IPC_SET, (void *)&bsd_shmid, NULL); case LINUX_IPC_RMID: { void *buf; if (args->buf == 0) buf = NULL; else { error = linux_shmid_pullup(args->cmd & LINUX_IPC_64, &linux_shmid, PTRIN(args->buf)); if (error) return error; linux_to_bsd_shmid_ds(&linux_shmid, &bsd_shmid); buf = (void *)&bsd_shmid; } return kern_shmctl(td, args->shmid, IPC_RMID, buf, NULL); } case LINUX_SHM_LOCK: case LINUX_SHM_UNLOCK: default: linux_msg(td, "ipc typ=%d not implemented", args->cmd & ~LINUX_IPC_64); return EINVAL; } }
void ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct ia32_sigframe sf, *sfp; struct siginfo32 siginfo; struct proc *p; struct thread *td; struct sigacts *psp; char *sp; struct trapframe *regs; char *xfpusave; size_t xfpusave_len; int oonstack; int sig; siginfo_to_siginfo32(&ksi->ksi_info, &siginfo); td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = siginfo.si_signo; psp = p->p_sigacts; #ifdef COMPAT_FREEBSD4 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { freebsd4_ia32_sendsig(catcher, ksi, mask); return; } #endif #ifdef COMPAT_43 if (SIGISMEMBER(psp->ps_osigset, sig)) { ia32_osendsig(catcher, ksi, mask); return; } #endif mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_rsp); if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) { xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu); xfpusave = __builtin_alloca(xfpusave_len); } else { xfpusave_len = 0; xfpusave = NULL; } /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; sf.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_edi = regs->tf_rdi; sf.sf_uc.uc_mcontext.mc_esi = regs->tf_rsi; sf.sf_uc.uc_mcontext.mc_ebp = regs->tf_rbp; sf.sf_uc.uc_mcontext.mc_isp = regs->tf_rsp; /* XXX */ sf.sf_uc.uc_mcontext.mc_ebx = regs->tf_rbx; sf.sf_uc.uc_mcontext.mc_edx = regs->tf_rdx; sf.sf_uc.uc_mcontext.mc_ecx = regs->tf_rcx; sf.sf_uc.uc_mcontext.mc_eax = regs->tf_rax; sf.sf_uc.uc_mcontext.mc_trapno = regs->tf_trapno; sf.sf_uc.uc_mcontext.mc_err = regs->tf_err; sf.sf_uc.uc_mcontext.mc_eip = regs->tf_rip; sf.sf_uc.uc_mcontext.mc_cs = regs->tf_cs; sf.sf_uc.uc_mcontext.mc_eflags = regs->tf_rflags; sf.sf_uc.uc_mcontext.mc_esp = regs->tf_rsp; sf.sf_uc.uc_mcontext.mc_ss = regs->tf_ss; sf.sf_uc.uc_mcontext.mc_ds = regs->tf_ds; sf.sf_uc.uc_mcontext.mc_es = regs->tf_es; sf.sf_uc.uc_mcontext.mc_fs = regs->tf_fs; sf.sf_uc.uc_mcontext.mc_gs = regs->tf_gs; sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ ia32_get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len); fpstate_drop(td); sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase; sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase; bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__)); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size; else sp = (char *)regs->tf_rsp; if (xfpusave != NULL) { sp -= xfpusave_len; sp = (char *)((unsigned long)sp & ~0x3Ful); sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp; } sp -= sizeof(sf); /* Align to 16 bytes. */ sfp = (struct ia32_sigframe *)((uintptr_t)sp & ~0xF); PROC_UNLOCK(p); /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_ucontext = (register_t)&sfp->sf_uc; bzero(&sf.sf_si, sizeof(sf.sf_si)); if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_siginfo = (u_int32_t)(uintptr_t)&sfp->sf_si; sf.sf_ah = (u_int32_t)(uintptr_t)catcher; /* Fill in POSIX parts */ sf.sf_si = siginfo; sf.sf_si.si_signo = sig; } else { /* Old FreeBSD-style arguments. */ sf.sf_siginfo = siginfo.si_code; sf.sf_addr = (u_int32_t)siginfo.si_addr; sf.sf_ah = (u_int32_t)(uintptr_t)catcher; } mtx_unlock(&psp->ps_mtx); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0 || (xfpusave != NULL && copyout(xfpusave, PTRIN(sf.sf_uc.uc_mcontext.mc_xfpustate), xfpusave_len) != 0)) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_rsp = (uintptr_t)sfp; regs->tf_rip = p->p_sysent->sv_sigcode_base; regs->tf_rflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucode32sel; regs->tf_ss = _udatasel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); /* XXXKIB leave user %fs and %gs untouched */ PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }
int linux_semctl(struct thread *td, struct linux_semctl_args *args) { struct l_semid_ds linux_semid; struct l_seminfo linux_seminfo; struct semid_ds semid; union semun semun; register_t rval; int cmd, error; switch (args->cmd & ~LINUX_IPC_64) { case LINUX_IPC_RMID: cmd = IPC_RMID; break; case LINUX_GETNCNT: cmd = GETNCNT; break; case LINUX_GETPID: cmd = GETPID; break; case LINUX_GETVAL: cmd = GETVAL; break; case LINUX_GETZCNT: cmd = GETZCNT; break; case LINUX_SETVAL: cmd = SETVAL; semun.val = args->arg.val; break; case LINUX_IPC_SET: cmd = IPC_SET; error = linux_semid_pullup(args->cmd & LINUX_IPC_64, &linux_semid, PTRIN(args->arg.buf)); if (error) return (error); linux_to_bsd_semid_ds(&linux_semid, &semid); semun.buf = &semid; return (kern_semctl(td, args->semid, args->semnum, cmd, &semun, td->td_retval)); case LINUX_IPC_STAT: case LINUX_SEM_STAT: if ((args->cmd & ~LINUX_IPC_64) == LINUX_IPC_STAT) cmd = IPC_STAT; else cmd = SEM_STAT; semun.buf = &semid; error = kern_semctl(td, args->semid, args->semnum, cmd, &semun, &rval); if (error) return (error); bsd_to_linux_semid_ds(&semid, &linux_semid); error = linux_semid_pushdown(args->cmd & LINUX_IPC_64, &linux_semid, PTRIN(args->arg.buf)); if (error == 0) td->td_retval[0] = (cmd == SEM_STAT) ? rval : 0; return (error); case LINUX_IPC_INFO: case LINUX_SEM_INFO: bcopy(&seminfo, &linux_seminfo, sizeof(linux_seminfo) ); /* XXX BSD equivalent? #define used_semids 10 #define used_sems 10 linux_seminfo.semusz = used_semids; linux_seminfo.semaem = used_sems; */ error = copyout(&linux_seminfo, PTRIN(args->arg.buf), sizeof(linux_seminfo)); if (error) return error; td->td_retval[0] = seminfo.semmni; return 0; /* No need for __semctl call */ case LINUX_GETALL: cmd = GETALL; semun.val = args->arg.val; break; case LINUX_SETALL: cmd = SETALL; semun.val = args->arg.val; break; default: linux_msg(td, "ipc type %d is not implemented", args->cmd & ~LINUX_IPC_64); return EINVAL; } return (kern_semctl(td, args->semid, args->semnum, cmd, &semun, td->td_retval)); }
int linux_mmap2(struct thread *td, struct linux_mmap2_args *args) { struct proc *p = td->td_proc; struct mmap_args /* { caddr_t addr; size_t len; int prot; int flags; int fd; long pad; off_t pos; } */ bsd_args; int error; struct file *fp; cap_rights_t rights; LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx", args->addr, args->len, args->prot, args->flags, args->fd, args->pgoff); error = 0; bsd_args.flags = 0; fp = NULL; /* * Linux mmap(2): * You must specify exactly one of MAP_SHARED and MAP_PRIVATE */ if (! ((args->flags & LINUX_MAP_SHARED) ^ (args->flags & LINUX_MAP_PRIVATE))) return (EINVAL); if (args->flags & LINUX_MAP_SHARED) bsd_args.flags |= MAP_SHARED; if (args->flags & LINUX_MAP_PRIVATE) bsd_args.flags |= MAP_PRIVATE; if (args->flags & LINUX_MAP_FIXED) bsd_args.flags |= MAP_FIXED; if (args->flags & LINUX_MAP_ANON) bsd_args.flags |= MAP_ANON; else bsd_args.flags |= MAP_NOSYNC; if (args->flags & LINUX_MAP_GROWSDOWN) bsd_args.flags |= MAP_STACK; /* * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC * on Linux/i386. We do this to ensure maximum compatibility. * Linux/ia64 does the same in i386 emulation mode. */ bsd_args.prot = args->prot; if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) bsd_args.prot |= PROT_READ | PROT_EXEC; /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : args->fd; if (bsd_args.fd != -1) { /* * Linux follows Solaris mmap(2) description: * The file descriptor fildes is opened with * read permission, regardless of the * protection options specified. */ error = fget(td, bsd_args.fd, cap_rights_init(&rights, CAP_MMAP), &fp); if (error != 0 ) return (error); if (fp->f_type != DTYPE_VNODE) { fdrop(fp, td); return (EINVAL); } /* Linux mmap() just fails for O_WRONLY files */ if (!(fp->f_flag & FREAD)) { fdrop(fp, td); return (EACCES); } fdrop(fp, td); } if (args->flags & LINUX_MAP_GROWSDOWN) { /* * The Linux MAP_GROWSDOWN option does not limit auto * growth of the region. Linux mmap with this option * takes as addr the inital BOS, and as len, the initial * region size. It can then grow down from addr without * limit. However, Linux threads has an implicit internal * limit to stack size of STACK_SIZE. Its just not * enforced explicitly in Linux. But, here we impose * a limit of (STACK_SIZE - GUARD_SIZE) on the stack * region, since we can do this with our mmap. * * Our mmap with MAP_STACK takes addr as the maximum * downsize limit on BOS, and as len the max size of * the region. It then maps the top SGROWSIZ bytes, * and auto grows the region down, up to the limit * in addr. * * If we don't use the MAP_STACK option, the effect * of this code is to allocate a stack region of a * fixed size of (STACK_SIZE - GUARD_SIZE). */ if ((caddr_t)PTRIN(args->addr) + args->len > p->p_vmspace->vm_maxsaddr) { /* * Some Linux apps will attempt to mmap * thread stacks near the top of their * address space. If their TOS is greater * than vm_maxsaddr, vm_map_growstack() * will confuse the thread stack with the * process stack and deliver a SEGV if they * attempt to grow the thread stack past their * current stacksize rlimit. To avoid this, * adjust vm_maxsaddr upwards to reflect * the current stacksize rlimit rather * than the maximum possible stacksize. * It would be better to adjust the * mmap'ed region, but some apps do not check * mmap's return value. */ PROC_LOCK(p); p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - lim_cur_proc(p, RLIMIT_STACK); PROC_UNLOCK(p); } /* * This gives us our maximum stack size and a new BOS. * If we're using VM_STACK, then mmap will just map * the top SGROWSIZ bytes, and let the stack grow down * to the limit at BOS. If we're not using VM_STACK * we map the full stack, since we don't have a way * to autogrow it. */ if (args->len > STACK_SIZE - GUARD_SIZE) { bsd_args.addr = (caddr_t)PTRIN(args->addr); bsd_args.len = args->len; } else { bsd_args.addr = (caddr_t)PTRIN(args->addr) - (STACK_SIZE - GUARD_SIZE - args->len); bsd_args.len = STACK_SIZE - GUARD_SIZE; } } else { bsd_args.addr = (caddr_t)PTRIN(args->addr); bsd_args.len = args->len; } bsd_args.pos = (off_t)args->pgoff; error = sys_mmap(td, &bsd_args); LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]); return (error); }
/* * mrsas_passthru: Handle pass-through commands * input: Adapter instance soft state argument pointer * * This function is called from mrsas_ioctl() to handle pass-through and ioctl * commands to Firmware. */ int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd) { struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; #ifdef COMPAT_FREEBSD32 struct mrsas_iocpacket32 *user_ioc32 = (struct mrsas_iocpacket32 *)arg; #endif union mrsas_frame *in_cmd = (union mrsas_frame *)&(user_ioc->frame.raw); struct mrsas_mfi_cmd *cmd = NULL; bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE]; bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE]; void *ioctl_data_mem[MAX_IOCTL_SGE]; bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; bus_dma_tag_t ioctl_sense_tag = 0; bus_dmamap_t ioctl_sense_dmamap = 0; void *ioctl_sense_mem = 0; bus_addr_t ioctl_sense_phys_addr = 0; int i, ioctl_data_size = 0, ioctl_sense_size, ret = 0; struct mrsas_sge32 *kern_sge32; unsigned long *sense_ptr; uint8_t *iov_base_ptrin = NULL; size_t iov_len = 0; /* * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In * this case do nothing and return 0 to it as status. */ if (in_cmd->dcmd.opcode == 0) { device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__); user_ioc->frame.hdr.cmd_status = MFI_STAT_OK; return (0); } /* Validate SGL length */ if (user_ioc->sge_count > MAX_IOCTL_SGE) { device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n", __func__, user_ioc->sge_count); return (ENOENT); } /* Get a command */ cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n"); return (ENOMEM); } /* * User's IOCTL packet has 2 frames (maximum). Copy those two frames * into our cmd's frames. cmd->frame's context will get overwritten * when we copy from user's frames. So set that value alone * separately */ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64); /* * The management interface between applications and the fw uses MFI * frames. E.g, RAID configuration changes, LD property changes etc * are accomplishes through different kinds of MFI frames. The driver * needs to care only about substituting user buffers with kernel * buffers in SGLs. The location of SGL is embedded in the struct * iocpacket itself. */ kern_sge32 = (struct mrsas_sge32 *) ((unsigned long)cmd->frame + user_ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; ioctl_data_size = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; ioctl_data_size = user_ioc32->sgl[i].iov_len; #endif } if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_data_size, 1, ioctl_data_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_data_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i], (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i], ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb, &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n"); ret = ENOMEM; goto out; } /* Save the physical address and length */ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i]; if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { kern_sge32[i].length = user_ioc->sgl[i].iov_len; iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { kern_sge32[i].length = user_ioc32->sgl[i].iov_len; iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } /* Copy in data from user space */ ret = copyin(iov_base_ptrin, ioctl_data_mem[i], iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n"); goto out; } } ioctl_sense_size = user_ioc->sense_len; if (user_ioc->sense_len) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_sense_size, 1, ioctl_sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap, ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb, &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n"); ret = ENOMEM; goto out; } sense_ptr = (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off); sense_ptr = ioctl_sense_mem; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; mrsas_issue_blocked_cmd(sc, cmd); cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } ret = copyout(ioctl_data_mem[i], iov_base_ptrin, iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n"); goto out; } } /* * copy out the sense */ if (user_ioc->sense_len) { /* * sense_buff points to the location that has the user sense * buffer address */ sense_ptr = (unsigned long *)((unsigned long)user_ioc->frame.raw + user_ioc->sense_off); ret = copyout(ioctl_sense_mem, (unsigned long *)*sense_ptr, user_ioc->sense_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n"); goto out; } } /* * Return command status to user space */ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u_int8_t)); out: /* * Release sense buffer */ if (ioctl_sense_phys_addr) bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap); if (ioctl_sense_mem != NULL) bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap); if (ioctl_sense_tag != NULL) bus_dma_tag_destroy(ioctl_sense_tag); /* * Release data buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; #endif } if (ioctl_data_phys_addr[i]) bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]); if (ioctl_data_mem[i] != NULL) bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i], ioctl_data_dmamap[i]); if (ioctl_data_tag[i] != NULL) bus_dma_tag_destroy(ioctl_data_tag[i]); } /* Free command */ mrsas_release_mfi_cmd(cmd); return (ret); }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by rt_sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int linux_rt_sigreturn(struct thread *td, struct linux_rt_sigreturn_args *args) { struct l_ucontext uc; struct l_sigcontext *context; sigset_t bmask; l_stack_t *lss; stack_t ss; struct trapframe *regs; int eflags; ksiginfo_t ksi; regs = td->td_frame; #ifdef DEBUG if (ldebug(rt_sigreturn)) printf(ARGS(rt_sigreturn, "%p"), (void *)args->ucp); #endif /* * The trampoline code hands us the ucontext. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ if (copyin(args->ucp, &uc, sizeof(uc)) != 0) return (EFAULT); context = &uc.uc_mcontext; /* * Check for security violations. */ #define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) eflags = context->sc_eflags; if (!EFLAGS_SECURE(eflags, regs->tf_rflags)) return(EINVAL); /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) if (!CS_SECURE(context->sc_cs)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_rip; trapsignal(td, &ksi); return(EINVAL); } linux_to_bsd_sigset(&uc.uc_sigmask, &bmask); kern_sigprocmask(td, SIG_SETMASK, &bmask, NULL, 0); /* * Restore signal context */ regs->tf_gs = context->sc_gs; regs->tf_fs = context->sc_fs; regs->tf_es = context->sc_es; regs->tf_ds = context->sc_ds; regs->tf_rdi = context->sc_edi; regs->tf_rsi = context->sc_esi; regs->tf_rbp = context->sc_ebp; regs->tf_rbx = context->sc_ebx; regs->tf_rdx = context->sc_edx; regs->tf_rcx = context->sc_ecx; regs->tf_rax = context->sc_eax; regs->tf_rip = context->sc_eip; regs->tf_cs = context->sc_cs; regs->tf_rflags = eflags; regs->tf_rsp = context->sc_esp_at_signal; regs->tf_ss = context->sc_ss; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); /* * call sigaltstack & ignore results.. */ lss = &uc.uc_stack; ss.ss_sp = PTRIN(lss->ss_sp); ss.ss_size = lss->ss_size; ss.ss_flags = linux_to_bsd_sigaltstack(lss->ss_flags); #ifdef DEBUG if (ldebug(rt_sigreturn)) printf(LMSG("rt_sigret flags: 0x%x, sp: %p, ss: 0x%lx, mask: 0x%x"), ss.ss_flags, ss.ss_sp, ss.ss_size, context->sc_mask); #endif (void)kern_sigaltstack(td, &ss, NULL); return (EJUSTRETURN); }