/** * File ioctl entry point * * We have some additional work to do in the ioctl call, as we have to support * both 32 bit and 64 bit userland programs. Data structures from userland are * converted using the `ddi_model_convert_from' function. */ static int quantis_copyin_uint(intptr_t arg, int flags, unsigned int* dst) { uint32_t tmp32; switch (ddi_model_convert_from(flags & FMODELS)) { case DDI_MODEL_ILP32: if (ddi_copyin((void*)arg, &tmp32, sizeof(tmp32), flags) < 0) { return -1; } *dst = tmp32; return 0; case DDI_MODEL_NONE: if (ddi_copyin((void*)arg, dst, sizeof(*dst), flags) < 0) { return -1; } return 0; default: return -1; } }
static int sckm_copyin_ioctl_getreq(intptr_t userarg, sckm_ioctl_getreq_t *driverarg, int flag) { #ifdef _MULTI_DATAMODEL switch (ddi_model_convert_from(flag & FMODELS)) { case DDI_MODEL_ILP32: { sckm_ioctl_getreq32_t driverarg32; if (ddi_copyin((caddr_t)userarg, &driverarg32, sizeof (sckm_ioctl_getreq32_t), flag)) { return (EFAULT); } driverarg->transid = driverarg32.transid; driverarg->type = driverarg32.type; driverarg->buf = (caddr_t)(uintptr_t)driverarg32.buf; driverarg->buf_len = driverarg32.buf_len; break; } case DDI_MODEL_NONE: { if (ddi_copyin((caddr_t)userarg, &driverarg, sizeof (sckm_ioctl_getreq_t), flag)) { return (EFAULT); } break; } } #else /* ! _MULTI_DATAMODEL */ if (ddi_copyin((caddr_t)userarg, &driverarg, sizeof (sckm_ioctl_getreq_t), flag)) { return (EFAULT); } #endif /* _MULTI_DATAMODEL */ return (0); }
static int fcoe_copyin_iocdata(intptr_t data, int mode, fcoeio_t **fcoeio, void **ibuf, void **abuf, void **obuf) { int ret = 0; *ibuf = NULL; *abuf = NULL; *obuf = NULL; *fcoeio = kmem_zalloc(sizeof (fcoeio_t), KM_SLEEP); if (ddi_copyin((void *)data, *fcoeio, sizeof (fcoeio_t), mode) != 0) { ret = EFAULT; goto copyin_iocdata_fail; } if ((*fcoeio)->fcoeio_ilen > FCOEIO_MAX_BUF_LEN || (*fcoeio)->fcoeio_alen > FCOEIO_MAX_BUF_LEN || (*fcoeio)->fcoeio_olen > FCOEIO_MAX_BUF_LEN) { ret = EFAULT; goto copyin_iocdata_fail; } if ((*fcoeio)->fcoeio_ilen) { *ibuf = kmem_zalloc((*fcoeio)->fcoeio_ilen, KM_SLEEP); if (ddi_copyin((void *)(unsigned long)(*fcoeio)->fcoeio_ibuf, *ibuf, (*fcoeio)->fcoeio_ilen, mode) != 0) { ret = EFAULT; goto copyin_iocdata_fail; } } if ((*fcoeio)->fcoeio_alen) { *abuf = kmem_zalloc((*fcoeio)->fcoeio_alen, KM_SLEEP); if (ddi_copyin((void *)(unsigned long)(*fcoeio)->fcoeio_abuf, *abuf, (*fcoeio)->fcoeio_alen, mode) != 0) { ret = EFAULT; goto copyin_iocdata_fail; } } if ((*fcoeio)->fcoeio_olen) { *obuf = kmem_zalloc((*fcoeio)->fcoeio_olen, KM_SLEEP); } return (ret); copyin_iocdata_fail: if (*abuf) { kmem_free(*abuf, (*fcoeio)->fcoeio_alen); *abuf = NULL; } if (*ibuf) { kmem_free(*ibuf, (*fcoeio)->fcoeio_ilen); *ibuf = NULL; } kmem_free(*fcoeio, sizeof (fcoeio_t)); return (ret); }
/* * Import a buffer from user-space. If the caller provides a kernel * address, we import to that address. If not, we kmem_alloc() the space * ourselves. */ static int import_buffer(import_export_t *iep, void *uaddr, void *kaddr, size_t size, uint32_t flags) { iep->ie_uaddr = uaddr; iep->ie_size = size; iep->ie_flags = flags & IE_EXPORT; if (size == 0 || uaddr == NULL) { *iep = null_ie; return (0); } if (kaddr == NULL) { iep->ie_kaddr = kmem_alloc(size, KM_SLEEP); iep->ie_flags |= IE_FREE; } else { iep->ie_kaddr = kaddr; iep->ie_flags &= ~IE_FREE; } if ((flags & IE_IMPORT) && (ddi_copyin(uaddr, iep->ie_kaddr, size, 0) != 0)) { if (iep->ie_flags & IE_FREE) { kmem_free(iep->ie_kaddr, iep->ie_size); iep->ie_kaddr = NULL; iep->ie_flags = 0; } return (-X_EFAULT); } return (0); }
static int hci1394_ioctl_wrphy(hci1394_state_t *soft_state, void *arg, int mode) { hci1394_ioctl_wrphy_t wrphy; int status; ASSERT(soft_state != NULL); ASSERT(arg != NULL); TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_enter, HCI1394_TNF_HAL_STACK, ""); status = ddi_copyin(arg, &wrphy, sizeof (hci1394_ioctl_wrphy_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_wrphy_ci_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } status = hci1394_ohci_phy_write(soft_state->ohci, wrphy.addr, wrphy.data); if (status != DDI_SUCCESS) { TNF_PROBE_0(hci1394_ioctl_wrphy_pw_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_exit, HCI1394_TNF_HAL_STACK, ""); return (EINVAL); } TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_exit, HCI1394_TNF_HAL_STACK, ""); return (0); }
static int hci1394_ioctl_rdreg(hci1394_state_t *soft_state, void *arg, int mode) { hci1394_ioctl_rdreg_t rdreg; int status; ASSERT(soft_state != NULL); ASSERT(arg != NULL); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_enter, HCI1394_TNF_HAL_STACK, ""); status = ddi_copyin(arg, &rdreg, sizeof (hci1394_ioctl_rdreg_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdreg_ci_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } hci1394_ohci_reg_read(soft_state->ohci, rdreg.addr, &rdreg.data); status = ddi_copyout(&rdreg, arg, sizeof (hci1394_ioctl_rdreg_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdreg_c0_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_exit, HCI1394_TNF_HAL_STACK, ""); return (0); }
/* * iscsi_conn_list_get_copyin - */ iscsi_conn_list_t * iscsi_ioctl_conn_oid_list_get_copyin(caddr_t arg, int mode) { iscsi_conn_list_t *cl_tmp; iscsi_conn_list_t *cl = NULL; size_t alloc_len; ASSERT(arg != NULL); cl_tmp = (iscsi_conn_list_t *)kmem_zalloc(sizeof (*cl_tmp), KM_SLEEP); if (ddi_copyin(arg, cl_tmp, sizeof (*cl_tmp), mode) == 0) { if (cl_tmp->cl_vers == ISCSI_INTERFACE_VERSION) { alloc_len = sizeof (*cl); if (cl_tmp->cl_in_cnt != 0) { alloc_len += ((cl_tmp->cl_in_cnt - 1) * sizeof (iscsi_if_conn_t)); } cl = (iscsi_conn_list_t *)kmem_zalloc(alloc_len, KM_SLEEP); bcopy(cl_tmp, cl, sizeof (*cl_tmp)); } } kmem_free(cl_tmp, sizeof (*cl_tmp)); return (cl); }
/* ARGSUSED3 */ int _rdc_link_down(void *arg, int mode, spcs_s_info_t kstatus, int *rvp) { char host[MAX_RDC_HOST_SIZE]; rdc_link_down_t *syncdp; clock_t timeout = RDC_SYNC_EVENT_TIMEOUT * 2; /* 2 min */ int rc = 0; if (ddi_copyin(arg, host, MAX_RDC_HOST_SIZE, mode)) return (EFAULT); syncdp = rdc_lookup_host(host); mutex_enter(&syncdp->syncd_mutex); if (!syncdp->link_down) { syncdp->waiting = 1; if (cv_timedwait_sig(&syncdp->syncd_cv, &syncdp->syncd_mutex, nsc_lbolt() + timeout) == 0) { /* Woken by a signal, not a link down event */ syncdp->waiting = 0; rc = EAGAIN; spcs_s_add(kstatus, rc); } } mutex_exit(&syncdp->syncd_mutex); return (rc); }
RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb) { int rc; RT_ASSERT_INTS_ON(); rc = ddi_copyin((const char *)R3PtrSrc, pvDst, cb, 0 /*flags*/); if (RT_LIKELY(rc == 0)) return VINF_SUCCESS; return VERR_ACCESS_DENIED; }
int _nscopen(dev_t dev, intptr_t arg, int mode, int *rvp) { minor_t mindev = getminor(dev); struct nscioc_open *op; nsc_fd_t *fd; int rc; op = nsc_kmem_alloc(sizeof (*op), KM_SLEEP, _nsc_local_mem); if (op == NULL) { return (ENOMEM); } if (ddi_copyin((void *)arg, op, sizeof (*op), mode) < 0) { nsc_kmem_free(op, sizeof (*op)); return (EFAULT); } mutex_enter(_nsc_minor_slp[mindev]); if (_nsc_minor_fd[mindev]) { mutex_exit(_nsc_minor_slp[mindev]); nsc_kmem_free(op, sizeof (*op)); return (EBUSY); } op->path[sizeof (op->path)-1] = 0; fd = nsc_open(op->path, (op->flag & NSC_TYPES), 0, 0, &rc); if (fd == NULL) { mutex_exit(_nsc_minor_slp[mindev]); nsc_kmem_free(op, sizeof (*op)); return (rc); } mode |= (op->mode - FOPEN); if (mode & (FWRITE|FEXCL)) { if ((rc = nsc_reserve(fd, NSC_PCATCH)) != 0) { mutex_exit(_nsc_minor_slp[mindev]); (void) nsc_close(fd); nsc_kmem_free(op, sizeof (*op)); return (rc); } } *rvp = 0; _nsc_minor_fd[mindev] = fd; mutex_exit(_nsc_minor_slp[mindev]); nsc_kmem_free(op, sizeof (*op)); return (0); }
/* * Helper for SMBIOC_DUP_DEV * Duplicate state from the FD @arg ("from") onto * the FD for this device instance. */ int smb_usr_dup_dev(smb_dev_t *sdp, intptr_t arg, int flags) { file_t *fp = NULL; vnode_t *vp; smb_dev_t *from_sdp; dev_t dev; int32_t ufd; int err; /* Should be no VC */ if (sdp->sd_vc != NULL) return (EISCONN); /* * Get from_sdp (what we will duplicate) */ if (ddi_copyin((void *) arg, &ufd, sizeof (ufd), flags)) return (EFAULT); if ((fp = getf(ufd)) == NULL) return (EBADF); /* rele fp below */ vp = fp->f_vnode; dev = vp->v_rdev; if (dev == 0 || dev == NODEV || getmajor(dev) != nsmb_major) { err = EINVAL; goto out; } from_sdp = ddi_get_soft_state(statep, getminor(dev)); if (from_sdp == NULL) { err = EINVAL; goto out; } /* * Duplicate VC and share references onto this FD. */ if ((sdp->sd_vc = from_sdp->sd_vc) != NULL) smb_vc_hold(sdp->sd_vc); if ((sdp->sd_share = from_sdp->sd_share) != NULL) smb_share_hold(sdp->sd_share); sdp->sd_level = from_sdp->sd_level; err = 0; out: if (fp) releasef(ufd); return (err); }
/* * iscsi_ioctl_copyin - */ void * iscsi_ioctl_copyin(caddr_t arg, int mode, size_t size) { void *data = NULL; ASSERT(arg != NULL); ASSERT(size != 0); data = kmem_alloc(size, KM_SLEEP); if (ddi_copyin(arg, data, size, mode) != 0) { kmem_free(data, size); data = NULL; } return (data); }
static int hci1394_ioctl_rdvreg(hci1394_state_t *soft_state, void *arg, int mode) { hci1394_ioctl_rdvreg_t rdvreg; int status; ASSERT(soft_state != NULL); ASSERT(arg != NULL); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_enter, HCI1394_TNF_HAL_STACK, ""); status = ddi_copyin(arg, &rdvreg, sizeof (hci1394_ioctl_rdvreg_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdvreg_ci_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } status = hci1394_vendor_reg_read(soft_state->vendor, rdvreg.regset, rdvreg.addr, &rdvreg.data); if (status != DDI_SUCCESS) { TNF_PROBE_0(hci1394_ioctl_rdvreg_vrr_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EINVAL); } status = ddi_copyout(&rdvreg, arg, sizeof (hci1394_ioctl_rdvreg_t), mode); if (status != 0) { TNF_PROBE_0(hci1394_ioctl_rdvreg_co_fail, HCI1394_TNF_HAL_ERROR, ""); TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (EFAULT); } TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, ""); return (0); }
/* * Given a pointer (arg) to a "struct lifreq" (potentially in user space), * determine the linkid for the interface name stored in that structure. * name is used as a buffer so that we can ensure a trailing \0 is appended * to the name safely. */ static int pfp_lifreq_getlinkid(intptr_t arg, struct lifreq *lifreqp, datalink_id_t *linkidp) { char name[LIFNAMSIZ + 1]; int error; if (ddi_copyin((void *)arg, lifreqp, sizeof (*lifreqp), 0) != 0) return (EFAULT); (void) strlcpy(name, lifreqp->lifr_name, sizeof (name)); error = dls_mgmt_get_linkid(name, linkidp); if (error != 0) error = dls_devnet_macname2linkid(name, linkidp); return (error); }
static int emul64_get_tgtrange(struct emul64 *emul64, intptr_t arg, emul64_tgt_t **tgtp, emul64_tgt_range_t *tgtr) { if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); return (EFAULT); } EMUL64_MUTEX_ENTER(emul64); *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); EMUL64_MUTEX_EXIT(emul64); if (*tgtp == NULL) { cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", tgtr->emul64_target, tgtr->emul64_lun, ddi_get_instance(emul64->emul64_dip)); return (ENXIO); } return (0); }
/* * Handle vsession-related ioctls * */ int fio_vsession_ioctl(fio_devstate_t * rsp, int cmd, intptr_t arg) { struct ink_cmd_msg msg; switch (cmd) { case INKFIO_VSESSION_CREATE: return fio_vsession_create(rsp); case INKFIO_VSESSION_DESTROY: return fio_vsession_destroy(rsp, (int) arg); case INKFIO_VSESSION_CMD: if (ddi_copyin((char *) arg, &msg, sizeof(struct ink_cmd_msg), 0)) { cmn_err(CE_WARN, "fio_vsession_ioctl: Invalid userspace pointer 0x%x.\n", (int) arg); return -1; } return fio_vsession_cmd(rsp, &msg); } cmn_err(CE_WARN, "fio: Unrecognized vsession ioctl 0x%x\n", cmd); return -1; }
/*ARGSUSED*/ static int cpuid_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval) { char areq[16]; void *ustr; switch (cmd) { case CPUID_GET_HWCAP: { STRUCT_DECL(cpuid_get_hwcap, h); STRUCT_INIT(h, mode); if (ddi_copyin((void *)arg, STRUCT_BUF(h), STRUCT_SIZE(h), mode)) return (EFAULT); if ((ustr = STRUCT_FGETP(h, cgh_archname)) != NULL && copyinstr(ustr, areq, sizeof (areq), NULL) != 0) return (EFAULT); areq[sizeof (areq) - 1] = '\0'; if (strcmp(areq, architecture) == 0) STRUCT_FSET(h, cgh_hwcap, auxv_hwcap); #if defined(_SYSCALL32_IMPL) else if (strcmp(areq, architecture_32) == 0) STRUCT_FSET(h, cgh_hwcap, auxv_hwcap32); #endif else STRUCT_FSET(h, cgh_hwcap, 0); if (ddi_copyout(STRUCT_BUF(h), (void *)arg, STRUCT_SIZE(h), mode)) return (EFAULT); return (0); } default: return (ENOTTY); } }
static int zfs_ioctl_compat_get_nvlist(uint64_t nvl, size_t size, int iflag, nvlist_t **nvp) { char *packed; int error; nvlist_t *list = NULL; /* * Read in and unpack the user-supplied nvlist. */ if (size == 0) return (EINVAL); #ifdef _KERNEL packed = kmem_alloc(size, KM_SLEEP); if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size, iflag)) != 0) { kmem_free(packed, size); return (error); } #else packed = (void *)(uintptr_t)nvl; #endif error = nvlist_unpack(packed, size, &list, 0); #ifdef _KERNEL kmem_free(packed, size); #endif if (error != 0) return (error); *nvp = list; return (0); }
static int do_gfx_ioctl(int cmd, intptr_t data, int mode, struct vgatext_softc *softc) { static char kernel_only[] = "gfxp_vgatext_ioctl: %s is a kernel only ioctl"; int err; int kd_mode; switch (cmd) { case KDSETMODE: return (vgatext_kdsetmode(softc, (int)data)); case KDGETMODE: kd_mode = softc->mode; if (ddi_copyout(&kd_mode, (void *)data, sizeof (int), mode)) return (EFAULT); break; case VIS_DEVINIT: if (!(mode & FKIOCTL)) { cmn_err(CE_CONT, kernel_only, "VIS_DEVINIT"); return (ENXIO); } err = vgatext_devinit(softc, (struct vis_devinit *)data); if (err != 0) { cmn_err(CE_WARN, "gfxp_vgatext_ioctl: could not" " initialize console"); return (err); } break; case VIS_CONSCOPY: /* move */ { struct vis_conscopy pma; if (ddi_copyin((void *)data, &pma, sizeof (struct vis_conscopy), mode)) return (EFAULT); vgatext_cons_copy(softc, &pma); break; } case VIS_CONSDISPLAY: /* display */ { struct vis_consdisplay display_request; if (ddi_copyin((void *)data, &display_request, sizeof (display_request), mode)) return (EFAULT); vgatext_cons_display(softc, &display_request); break; } case VIS_CONSCURSOR: { struct vis_conscursor cursor_request; if (ddi_copyin((void *)data, &cursor_request, sizeof (cursor_request), mode)) return (EFAULT); vgatext_cons_cursor(softc, &cursor_request); if (cursor_request.action == VIS_GET_CURSOR && ddi_copyout(&cursor_request, (void *)data, sizeof (cursor_request), mode)) return (EFAULT); break; } case VIS_GETCMAP: case VIS_PUTCMAP: case FBIOPUTCMAP: case FBIOGETCMAP: /* * At the moment, text mode is not considered to have * a color map. */ return (EINVAL); case FBIOGATTR: if (copyout(&vgatext_attr, (void *)data, sizeof (struct fbgattr))) return (EFAULT); break; case FBIOGTYPE: if (copyout(&vgatext_attr.fbtype, (void *)data, sizeof (struct fbtype))) return (EFAULT); break; default: return (ENXIO); } return (0); }
/* ARGSUSED */ static int pppt_drv_ioctl(dev_t drv, int cmd, intptr_t argp, int flag, cred_t *cred, int *retval) { int rc; void *buf; size_t buf_size; pppt_iocdata_t iocd; door_handle_t new_handle; if (drv_priv(cred) != 0) { return (EPERM); } rc = ddi_copyin((void *)argp, &iocd, sizeof (iocd), flag); if (rc) return (EFAULT); if (iocd.pppt_version != PPPT_VERSION_1) return (EINVAL); switch (cmd) { case PPPT_MESSAGE: /* XXX limit buf_size ? */ buf_size = (size_t)iocd.pppt_buf_size; buf = kmem_alloc(buf_size, KM_SLEEP); if (buf == NULL) return (ENOMEM); rc = ddi_copyin((void *)(unsigned long)iocd.pppt_buf, buf, buf_size, flag); if (rc) { kmem_free(buf, buf_size); return (EFAULT); } stmf_ic_rx_msg(buf, buf_size); kmem_free(buf, buf_size); break; case PPPT_INSTALL_DOOR: new_handle = door_ki_lookup((int)iocd.pppt_door_fd); if (new_handle == NULL) return (EINVAL); mutex_enter(&pppt_global.global_door_lock); ASSERT(pppt_global.global_svc_state == PSS_ENABLED); if (pppt_global.global_door != NULL) { /* * There can only be one door installed */ mutex_exit(&pppt_global.global_door_lock); door_ki_rele(new_handle); return (EBUSY); } pppt_global.global_door = new_handle; mutex_exit(&pppt_global.global_door_lock); break; } return (rc); }
int nskernd_command(intptr_t arg, int mode, int *rvalp) { struct nskernd *udata = NULL; uint64_t arg1, arg2; int rc; *rvalp = 0; rc = 0; udata = kmem_alloc(sizeof (*udata), KM_SLEEP); if (ddi_copyin((void *)arg, udata, sizeof (*udata), mode) < 0) { kmem_free(udata, sizeof (*udata)); return (EFAULT); } switch (udata->command) { case NSKERND_START: /* User program start */ *rvalp = nskernd_start(udata->data1); break; case NSKERND_STOP: /* User program requesting stop */ mutex_enter(&nskernd_lock); nskernd_cleanup(); mutex_exit(&nskernd_lock); break; case NSKERND_WAIT: mutex_enter(&nskernd_lock); bcopy(udata, &nskernd_kdata, sizeof (*udata)); if (nskernd_ask > 0) cv_signal(&nskernd_ask_cv); nskernd_u_wait++; if (cv_wait_sig(&nskernd_u_cv, &nskernd_lock) != 0) { /* * woken by cv_signal() or cv_broadcast() */ bcopy(&nskernd_kdata, udata, sizeof (*udata)); } else { /* * signal - the user process has blocked all * signals except for SIGTERM and the * uncatchables, so the process is about to die * and we need to clean up. */ udata->command = NSKERND_STOP; udata->data1 = (uint64_t)1; /* cleanup done */ nskernd_cleanup(); } nskernd_u_wait--; mutex_exit(&nskernd_lock); if (ddi_copyout(udata, (void *)arg, sizeof (*udata), mode) < 0) { rc = EFAULT; break; } break; case NSKERND_NEWLWP: /* save kmem by freeing the udata structure */ arg1 = udata->data1; kmem_free(udata, sizeof (*udata)); udata = NULL; nsc_runlwp(arg1); break; case NSKERND_LOCK: /* save kmem by freeing the udata structure */ arg1 = udata->data1; arg2 = udata->data2; kmem_free(udata, sizeof (*udata)); udata = NULL; nsc_lockchild(arg1, arg2); break; default: cmn_err(CE_WARN, "nskernd: unknown command %d", udata->command); rc = EINVAL; break; } if (udata != NULL) { kmem_free(udata, sizeof (*udata)); udata = NULL; } return (rc); }
/*ARGSUSED*/ static int ntwdt_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) { int instance = getminor(dev); int retval = 0; ntwdt_state_t *ntwdt_ptr = NULL; ntwdt_runstate_t *ntwdt_state; lom_dogstate_t lom_dogstate; lom_dogctl_t lom_dogctl; uint32_t lom_dogtime; if ((ntwdt_ptr = getstate(instance)) == NULL) { return (ENXIO); } ntwdt_state = ntwdt_ptr->ntwdt_run_state; switch (cmd) { case LOMIOCDOGSTATE: mutex_enter(&ntwdt_state->ntwdt_runstate_mutex); lom_dogstate.reset_enable = ntwdt_state->ntwdt_reset_enabled; lom_dogstate.dog_enable = ntwdt_state->ntwdt_watchdog_enabled; lom_dogstate.dog_timeout = ntwdt_state->ntwdt_watchdog_timeout; mutex_exit(&ntwdt_state->ntwdt_runstate_mutex); if (ddi_copyout((caddr_t)&lom_dogstate, (caddr_t)arg, sizeof (lom_dogstate_t), mode) != 0) { retval = EFAULT; } break; case LOMIOCDOGCTL: if (ddi_copyin((caddr_t)arg, (caddr_t)&lom_dogctl, sizeof (lom_dogctl_t), mode) != 0) { retval = EFAULT; break; } NTWDT_DBG(NTWDT_DBG_IOCTL, ("reset_enable: %d, and dog_enable: " "%d, watchdog_timeout %d", lom_dogctl.reset_enable, lom_dogctl.dog_enable, ntwdt_state->ntwdt_watchdog_timeout)); /* * ignore request to enable reset while disabling watchdog. */ if (!lom_dogctl.dog_enable && lom_dogctl.reset_enable) { NTWDT_DBG(NTWDT_DBG_IOCTL, ("invalid combination of " "reset_enable: %d, and dog_enable: %d", lom_dogctl.reset_enable, lom_dogctl.dog_enable)); retval = EINVAL; break; } mutex_enter(&ntwdt_state->ntwdt_runstate_mutex); if (ntwdt_state->ntwdt_watchdog_timeout == 0) { /* * the LOMIOCDOGTIME has never been used to setup * a valid timeout. */ NTWDT_DBG(NTWDT_DBG_IOCTL, ("timeout has not been set" "watchdog_timeout: %d", ntwdt_state->ntwdt_watchdog_timeout)); retval = EINVAL; goto end; } /* * Store the user specified state in the softstate. */ ntwdt_state->ntwdt_reset_enabled = lom_dogctl.reset_enable; ntwdt_state->ntwdt_watchdog_enabled = lom_dogctl.dog_enable; if (ntwdt_state->ntwdt_watchdog_enabled != 0) { /* * The user wants to enable the watchdog. * Arm the watchdog and start the cyclic. */ ntwdt_arm_watchdog(ntwdt_state); if (ntwdt_state->ntwdt_timer_running == 0) { ntwdt_start_timer(ntwdt_ptr); } NTWDT_DBG(NTWDT_DBG_IOCTL, ("AWDT is enabled")); } else { /* * The user wants to disable the watchdog. */ if (ntwdt_state->ntwdt_timer_running != 0) { ntwdt_stop_timer(ntwdt_ptr); } NTWDT_DBG(NTWDT_DBG_IOCTL, ("AWDT is disabled")); } mutex_exit(&ntwdt_state->ntwdt_runstate_mutex); break; case LOMIOCDOGTIME: if (ddi_copyin((caddr_t)arg, (caddr_t)&lom_dogtime, sizeof (uint32_t), mode) != 0) { retval = EFAULT; break; } NTWDT_DBG(NTWDT_DBG_IOCTL, ("user set timeout: %d", lom_dogtime)); /* * Ensure specified timeout is valid. */ if ((lom_dogtime == 0) || (lom_dogtime > (uint32_t)NTWDT_MAX_TIMEOUT)) { retval = EINVAL; NTWDT_DBG(NTWDT_DBG_IOCTL, ("user set invalid " "timeout: %d", (int)TICK_TO_MSEC(lom_dogtime))); break; } mutex_enter(&ntwdt_state->ntwdt_runstate_mutex); ntwdt_state->ntwdt_watchdog_timeout = lom_dogtime; /* * If awdt is currently running, re-arm it with the * newly-specified timeout value. */ if (ntwdt_state->ntwdt_timer_running != 0) { ntwdt_arm_watchdog(ntwdt_state); } mutex_exit(&ntwdt_state->ntwdt_runstate_mutex); break; case LOMIOCDOGPAT: /* * Allow user to pat the watchdog timer. */ NTWDT_DBG(NTWDT_DBG_IOCTL, ("DOGPAT is invoked")); mutex_enter(&ntwdt_state->ntwdt_runstate_mutex); /* * If awdt is not enabled or underlying cyclic is not * running, exit. */ if (!(ntwdt_state->ntwdt_watchdog_enabled && ntwdt_state->ntwdt_timer_running)) { NTWDT_DBG(NTWDT_DBG_IOCTL, ("PAT: AWDT not enabled")); goto end; } if (ntwdt_state->ntwdt_watchdog_expired == 0) { /* * re-arm the awdt. */ ntwdt_arm_watchdog(ntwdt_state); NTWDT_DBG(NTWDT_DBG_IOCTL, ("AWDT patted, " "remainning seconds: %d", ntwdt_state->ntwdt_time_remaining)); } mutex_exit(&ntwdt_state->ntwdt_runstate_mutex); break; default: retval = EINVAL; break; } return (retval); end: mutex_exit(&ntwdt_state->ntwdt_runstate_mutex); return (retval); }
int emul64_error_inject_req(struct emul64 *emul64, intptr_t arg) { emul64_tgt_t *tgt; struct emul64_error_inj_data error_inj_req; /* Check args */ if (arg == NULL) { return (EINVAL); } if (ddi_copyin((void *)arg, &error_inj_req, sizeof (error_inj_req), 0) != 0) { cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n"); return (EFAULT); } EMUL64_MUTEX_ENTER(emul64); tgt = find_tgt(emul64, error_inj_req.eccd_target, error_inj_req.eccd_lun); EMUL64_MUTEX_EXIT(emul64); /* Make sure device exists */ if (tgt == NULL) { return (ENODEV); } /* Free old sense buffer if we have one */ if (tgt->emul64_einj_sense_data != NULL) { ASSERT(tgt->emul64_einj_sense_length != 0); kmem_free(tgt->emul64_einj_sense_data, tgt->emul64_einj_sense_length); tgt->emul64_einj_sense_data = NULL; tgt->emul64_einj_sense_length = 0; } /* * Now handle error injection request. If error injection * is requested we will return the sense data provided for * any I/O to this target until told to stop. */ tgt->emul64_einj_state = error_inj_req.eccd_inj_state; tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen; tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state; tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason; tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status; switch (error_inj_req.eccd_inj_state) { case ERR_INJ_ENABLE: case ERR_INJ_ENABLE_NODATA: if (error_inj_req.eccd_sns_dlen) { tgt->emul64_einj_sense_data = kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP); /* Copy sense data */ if (ddi_copyin((void *)(arg + sizeof (error_inj_req)), tgt->emul64_einj_sense_data, error_inj_req.eccd_sns_dlen, 0) != 0) { cmn_err(CE_WARN, "emul64: sense data copy in failed\n"); return (EFAULT); } } break; case ERR_INJ_DISABLE: default: break; } return (0); }
/** * Worker for VBoxSupDrvIOCtl that takes the slow IOCtl functions. * * @returns Solaris errno. * * @param pSession The session. * @param Cmd The IOCtl command. * @param Mode Information bitfield (for specifying ownership of data) * @param iArg User space address of the request buffer. */ static int VBoxDrvSolarisIOCtlSlow(PSUPDRVSESSION pSession, int iCmd, int Mode, intptr_t iArg) { int rc; uint32_t cbBuf = 0; union { SUPREQHDR Hdr; uint8_t abBuf[64]; } StackBuf; PSUPREQHDR pHdr; /* * Read the header. */ if (RT_UNLIKELY(IOCPARM_LEN(iCmd) != sizeof(StackBuf.Hdr))) { LogRel(("VBoxDrvSolarisIOCtlSlow: iCmd=%#x len %d expected %d\n", iCmd, IOCPARM_LEN(iCmd), sizeof(StackBuf.Hdr))); return EINVAL; } rc = ddi_copyin((void *)iArg, &StackBuf.Hdr, sizeof(StackBuf.Hdr), Mode); if (RT_UNLIKELY(rc)) { LogRel(("VBoxDrvSolarisIOCtlSlow: ddi_copyin(,%#lx,) failed; iCmd=%#x. rc=%d\n", iArg, iCmd, rc)); return EFAULT; } if (RT_UNLIKELY((StackBuf.Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC)) { LogRel(("VBoxDrvSolarisIOCtlSlow: bad header magic %#x; iCmd=%#x\n", StackBuf.Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK, iCmd)); return EINVAL; } cbBuf = RT_MAX(StackBuf.Hdr.cbIn, StackBuf.Hdr.cbOut); if (RT_UNLIKELY( StackBuf.Hdr.cbIn < sizeof(StackBuf.Hdr) || StackBuf.Hdr.cbOut < sizeof(StackBuf.Hdr) || cbBuf > _1M*16)) { LogRel(("VBoxDrvSolarisIOCtlSlow: max(%#x,%#x); iCmd=%#x\n", StackBuf.Hdr.cbIn, StackBuf.Hdr.cbOut, iCmd)); return EINVAL; } /* * Buffer the request. */ if (cbBuf <= sizeof(StackBuf)) pHdr = &StackBuf.Hdr; else { pHdr = RTMemTmpAlloc(cbBuf); if (RT_UNLIKELY(!pHdr)) { LogRel(("VBoxDrvSolarisIOCtlSlow: failed to allocate buffer of %d bytes for iCmd=%#x.\n", cbBuf, iCmd)); return ENOMEM; } } rc = ddi_copyin((void *)iArg, pHdr, cbBuf, Mode); if (RT_UNLIKELY(rc)) { LogRel(("VBoxDrvSolarisIOCtlSlow: copy_from_user(,%#lx, %#x) failed; iCmd=%#x. rc=%d\n", iArg, cbBuf, iCmd, rc)); if (pHdr != &StackBuf.Hdr) RTMemFree(pHdr); return EFAULT; } /* * Process the IOCtl. */ rc = supdrvIOCtl(iCmd, &g_DevExt, pSession, pHdr, cbBuf); /* * Copy ioctl data and output buffer back to user space. */ if (RT_LIKELY(!rc)) { uint32_t cbOut = pHdr->cbOut; if (RT_UNLIKELY(cbOut > cbBuf)) { LogRel(("VBoxDrvSolarisIOCtlSlow: too much output! %#x > %#x; iCmd=%#x!\n", cbOut, cbBuf, iCmd)); cbOut = cbBuf; } rc = ddi_copyout(pHdr, (void *)iArg, cbOut, Mode); if (RT_UNLIKELY(rc != 0)) { /* this is really bad */ LogRel(("VBoxDrvSolarisIOCtlSlow: ddi_copyout(,%p,%d) failed. rc=%d\n", (void *)iArg, cbBuf, rc)); rc = EFAULT; } } else rc = EINVAL; if (pHdr != &StackBuf.Hdr) RTMemTmpFree(pHdr); return rc; }
static int privcmd_HYPERVISOR_platform_op(xen_platform_op_t *opp) { import_export_t op_ie, sub_ie, sub2_ie; xen_platform_op_t op; int error; if (import_buffer(&op_ie, opp, &op, sizeof (op), IE_IMPEXP) != 0) return (-X_EFAULT); sub_ie = null_ie; sub2_ie = null_ie; /* * Check this first because our wrapper will forcibly overwrite it. */ if (op.interface_version != XENPF_INTERFACE_VERSION) { error = -X_EACCES; export_buffer(&op_ie, &error); return (error); } /* * Now handle any platform ops with embedded pointers elsewhere * in the user address space that also need to be tacked down * while the hypervisor futzes with them. */ switch (op.cmd) { case XENPF_settime: case XENPF_add_memtype: case XENPF_del_memtype: case XENPF_read_memtype: case XENPF_platform_quirk: case XENPF_enter_acpi_sleep: case XENPF_change_freq: case XENPF_panic_init: break; case XENPF_microcode_update: error = import_handle(&sub_ie, &op.u.microcode.data, op.u.microcode.length, IE_IMPORT); break; case XENPF_getidletime: error = import_handle(&sub_ie, &op.u.getidletime.cpumap_bitmap, op.u.getidletime.cpumap_nr_cpus, IE_IMPEXP); if (error != 0) break; error = import_handle(&sub2_ie, &op.u.getidletime.idletime, op.u.getidletime.cpumap_nr_cpus * sizeof (uint64_t), IE_EXPORT); break; case XENPF_set_processor_pminfo: { size_t s; switch (op.u.set_pminfo.type) { case XEN_PM_PX: s = op.u.set_pminfo.u.perf.state_count * sizeof (xen_processor_px_t); if (op.u.set_pminfo.u.perf.flags & XEN_PX_PSS) { error = import_handle(&sub_ie, &op.u.set_pminfo.u.perf.states, s, IE_IMPORT); } break; case XEN_PM_CX: s = op.u.set_pminfo.u.power.count * sizeof (xen_processor_cx_t); error = import_handle(&sub_ie, &op.u.set_pminfo.u.power.states, s, IE_IMPORT); break; case XEN_PM_TX: break; default: error = -X_EINVAL; break; } break; } case XENPF_firmware_info: { uint16_t len; void *uaddr; switch (op.u.firmware_info.type) { case XEN_FW_DISK_INFO: /* * Ugh.. another hokey interface. The first 16 bits * of the buffer are also used as the (input) length. */ uaddr = uaddr_from_handle( &op.u.firmware_info.u.disk_info.edd_params); error = ddi_copyin(uaddr, &len, sizeof (len), 0); if (error != 0) break; error = import_handle(&sub_ie, &op.u.firmware_info.u.disk_info.edd_params, len, IE_IMPEXP); break; case XEN_FW_VBEDDC_INFO: error = import_handle(&sub_ie, &op.u.firmware_info.u.vbeddc_info.edid, 128, IE_EXPORT); break; case XEN_FW_DISK_MBR_SIGNATURE: default: break; } break; } default: /* FIXME: see this with non-existed ID 38 ???? */ #ifdef DEBUG printf("unrecognized HYPERVISOR_platform_op %d pid %d\n", op.cmd, curthread->t_procp->p_pid); #endif return (-X_EINVAL); } if (error == 0) error = HYPERVISOR_platform_op(&op); export_buffer(&op_ie, &error); export_buffer(&sub_ie, &error); export_buffer(&sub2_ie, &error); return (error); }
/*ARGSUSED*/ int do_privcmd_hypercall(void *uarg, int mode, cred_t *cr, int *rval) { privcmd_hypercall_t __hc, *hc = &__hc; int error; if (ddi_copyin(uarg, hc, sizeof (*hc), mode)) return (EFAULT); switch (hc->op) { case __HYPERVISOR_mmu_update: error = privcmd_HYPERVISOR_mmu_update( (mmu_update_t *)hc->arg[0], (int)hc->arg[1], (int *)hc->arg[2], (domid_t)hc->arg[3]); break; case __HYPERVISOR_domctl: error = privcmd_HYPERVISOR_domctl( (xen_domctl_t *)hc->arg[0]); break; case __HYPERVISOR_sysctl: error = privcmd_HYPERVISOR_sysctl( (xen_sysctl_t *)hc->arg[0]); break; case __HYPERVISOR_platform_op: error = privcmd_HYPERVISOR_platform_op( (xen_platform_op_t *)hc->arg[0]); break; case __HYPERVISOR_memory_op: error = privcmd_HYPERVISOR_memory_op( (int)hc->arg[0], (void *)hc->arg[1]); break; case __HYPERVISOR_event_channel_op: error = privcmd_HYPERVISOR_event_channel_op( (int)hc->arg[0], (void *)hc->arg[1]); break; case __HYPERVISOR_xen_version: error = privcmd_HYPERVISOR_xen_version( (int)hc->arg[0], (void *)hc->arg[1]); break; case __HYPERVISOR_mmuext_op: error = privcmd_HYPERVISOR_mmuext_op( (struct mmuext_op *)hc->arg[0], (int)hc->arg[1], (uint_t *)hc->arg[2], (domid_t)hc->arg[3]); break; case __HYPERVISOR_xsm_op: error = privcmd_HYPERVISOR_xsm_op((void *)hc->arg[0]); break; case __HYPERVISOR_hvm_op: error = privcmd_HYPERVISOR_hvm_op( (int)hc->arg[0], (void *)hc->arg[1]); break; case __HYPERVISOR_sched_op: error = privcmd_HYPERVISOR_sched_op( (int)hc->arg[0], (void *)hc->arg[1]); break; default: if (allow_all_hypercalls) error = __hypercall5(hc->op, hc->arg[0], hc->arg[1], hc->arg[2], hc->arg[3], hc->arg[4]); else { #ifdef DEBUG printf("unrecognized hypercall %ld\n", hc->op); #endif error = -X_EPERM; } break; } if (error > 0) { *rval = error; error = 0; } else if (error != 0) error = xen_xlate_errcode(error); return (error); }
/* ARGSUSED */ static int tsalarm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) { int inst = getminor(dev); struct tsalarm_softc *softc; int retval = 0; ts_aldata_t ts_alinfo; int alarm_type, alarm_state = 0; if ((softc = getsoftc(inst)) == NULL) return (ENXIO); mutex_enter(&softc->mutex); switch (cmd) { case LOMIOCALSTATE: case LOMIOCALSTATE_OLD: { if (ddi_copyin((caddr_t)arg, (caddr_t)&ts_alinfo, sizeof (ts_aldata_t), mode) != 0) { retval = EFAULT; goto end; } alarm_type = ts_alinfo.alarm_no; if ((alarm_type < ALARM_CRITICAL) || (alarm_type > ALARM_USER)) { retval = EINVAL; goto end; } retval = rmclomv_alarm_get(alarm_type, &alarm_state); if (retval != 0) goto end; if ((alarm_state != 0) && (alarm_state != 1)) { retval = EIO; goto end; } ts_alinfo.alarm_state = alarm_state; if (ddi_copyout((caddr_t)&ts_alinfo, (caddr_t)arg, sizeof (ts_aldata_t), mode) != 0) { retval = EFAULT; goto end; } } break; case LOMIOCALCTL: case LOMIOCALCTL_OLD: { if (ddi_copyin((caddr_t)arg, (caddr_t)&ts_alinfo, sizeof (ts_aldata_t), mode) != 0) { retval = EFAULT; goto end; } alarm_type = ts_alinfo.alarm_no; alarm_state = ts_alinfo.alarm_state; if ((alarm_type < ALARM_CRITICAL) || (alarm_type > ALARM_USER)) { retval = EINVAL; goto end; } if ((alarm_state < ALARM_OFF) || (alarm_state > ALARM_ON)) { retval = EINVAL; goto end; } retval = rmclomv_alarm_set(alarm_type, alarm_state); } break; default: retval = EINVAL; break; } end: mutex_exit(&softc->mutex); return (retval); }
/* * There are only two special setsockopt's for SOL_SOCKET with PF_PACKET: * SO_ATTACH_FILTER and SO_DETACH_FILTER. All other setsockopt requests * that are for SOL_SOCKET are passed back to the socket layer for its * generic implementation. * * Both of these setsockopt values are candidates for being handled by the * socket layer itself in future, however this requires understanding how * they would interact with all other sockets. */ static int pfp_setsocket_sockopt(sock_lower_handle_t handle, int option_name, const void *optval, socklen_t optlen) { struct bpf_program prog; struct bpf_insn *fcode; struct pfpsock *ps; int error = 0; int size; ps = (struct pfpsock *)handle; switch (option_name) { case SO_ATTACH_FILTER : #ifdef _LP64 if (optlen == sizeof (struct bpf_program32)) { struct bpf_program32 prog32; bcopy(optval, &prog32, sizeof (prog32)); prog.bf_len = prog32.bf_len; prog.bf_insns = (void *)(uint64_t)prog32.bf_insns; } else #endif if (optlen == sizeof (struct bpf_program)) { bcopy(optval, &prog, sizeof (prog)); } else if (optlen != sizeof (struct bpf_program)) { return (EINVAL); } size = prog.bf_len * sizeof (*prog.bf_insns); fcode = kmem_alloc(size, KM_SLEEP); if (ddi_copyin(prog.bf_insns, fcode, size, 0) != 0) { kmem_free(fcode, size); return (EFAULT); } if (bpf_validate(fcode, (int)prog.bf_len)) { rw_enter(&ps->ps_bpflock, RW_WRITER); pfp_release_bpf(ps); ps->ps_bpf.bf_insns = fcode; ps->ps_bpf.bf_len = size; rw_exit(&ps->ps_bpflock); return (0); } kmem_free(fcode, size); error = EINVAL; break; case SO_DETACH_FILTER : pfp_release_bpf(ps); break; default : /* * If sockfs code receives this error in return from the * getsockopt downcall it handles the option locally, if * it can. This implements SO_RCVBUF, etc. */ error = ENOPROTOOPT; break; } return (error); }
static int VBoxUSBMonSolarisIOCtl(dev_t Dev, int Cmd, intptr_t pArg, int Mode, cred_t *pCred, int *pVal) { LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl Dev=%d Cmd=%d pArg=%p Mode=%d\n", Dev, Cmd, pArg)); /* * Get the session from the soft state item. */ vboxusbmon_state_t *pState = ddi_get_soft_state(g_pVBoxUSBMonSolarisState, getminor(Dev)); if (!pState) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: no state data for %d\n", getminor(Dev))); return EINVAL; } /* * Read the request wrapper. Though We don't really need wrapper struct. now * it's room for the future as Solaris isn't generous regarding the size. */ VBOXUSBREQ ReqWrap; if (IOCPARM_LEN(Cmd) != sizeof(ReqWrap)) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: bad request %#x size=%d expected=%d\n", Cmd, IOCPARM_LEN(Cmd), sizeof(ReqWrap))); return ENOTTY; } int rc = ddi_copyin((void *)pArg, &ReqWrap, sizeof(ReqWrap), Mode); if (RT_UNLIKELY(rc)) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: ddi_copyin failed to read header pArg=%p Cmd=%d. rc=%d.\n", pArg, Cmd, rc)); return EINVAL; } if (ReqWrap.u32Magic != VBOXUSBMON_MAGIC) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: bad magic %#x; pArg=%p Cmd=%d.\n", ReqWrap.u32Magic, pArg, Cmd)); return EINVAL; } if (RT_UNLIKELY( ReqWrap.cbData == 0 || ReqWrap.cbData > _1M*16)) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: bad size %#x; pArg=%p Cmd=%d.\n", ReqWrap.cbData, pArg, Cmd)); return EINVAL; } /* * Read the request. */ void *pvBuf = RTMemTmpAlloc(ReqWrap.cbData); if (RT_UNLIKELY(!pvBuf)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: RTMemTmpAlloc failed to alloc %d bytes.\n", ReqWrap.cbData)); return ENOMEM; } rc = ddi_copyin((void *)(uintptr_t)ReqWrap.pvDataR3, pvBuf, ReqWrap.cbData, Mode); if (RT_UNLIKELY(rc)) { RTMemTmpFree(pvBuf); LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: ddi_copyin failed; pvBuf=%p pArg=%p Cmd=%d. rc=%d\n", pvBuf, pArg, Cmd, rc)); return EFAULT; } if (RT_UNLIKELY( ReqWrap.cbData != 0 && !VALID_PTR(pvBuf))) { RTMemTmpFree(pvBuf); LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: pvBuf invalid pointer %p\n", pvBuf)); return EINVAL; } Log((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: pid=%d.\n", (int)RTProcSelf())); /* * Process the IOCtl. */ size_t cbDataReturned; rc = vboxUSBMonSolarisProcessIOCtl(Cmd, pState, pvBuf, ReqWrap.cbData, &cbDataReturned); ReqWrap.rc = rc; rc = 0; if (RT_UNLIKELY(cbDataReturned > ReqWrap.cbData)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: too much output data %d expected %d\n", cbDataReturned, ReqWrap.cbData)); cbDataReturned = ReqWrap.cbData; } ReqWrap.cbData = cbDataReturned; /* * Copy the request back to user space. */ rc = ddi_copyout(&ReqWrap, (void *)pArg, sizeof(ReqWrap), Mode); if (RT_LIKELY(!rc)) { /* * Copy the payload (if any) back to user space. */ if (cbDataReturned > 0) { rc = ddi_copyout(pvBuf, (void *)(uintptr_t)ReqWrap.pvDataR3, cbDataReturned, Mode); if (RT_UNLIKELY(rc)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: ddi_copyout failed; pvBuf=%p pArg=%p Cmd=%d. rc=%d\n", pvBuf, pArg, Cmd, rc)); rc = EFAULT; } } } else { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: ddi_copyout(1) failed pArg=%p Cmd=%d\n", pArg, Cmd)); rc = EFAULT; } *pVal = rc; RTMemTmpFree(pvBuf); return rc; }
/* * Perform register accesses on the nexus device itself. */ int pxtool_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode) { pcitool_reg_t prg; size_t size; px_t *px_p = DIP_TO_STATE(dip); boolean_t is_write = B_FALSE; uint32_t rval = 0; if (cmd == PCITOOL_NEXUS_SET_REG) is_write = B_TRUE; DBG(DBG_TOOLS, dip, "pxtool_bus_reg_ops set/get reg\n"); /* Read data from userland. */ if (ddi_copyin(arg, &prg, sizeof (pcitool_reg_t), mode) != DDI_SUCCESS) { DBG(DBG_TOOLS, dip, "Error reading arguments\n"); return (EFAULT); } size = PCITOOL_ACC_ATTR_SIZE(prg.acc_attr); DBG(DBG_TOOLS, dip, "raw bus:0x%x, dev:0x%x, func:0x%x\n", prg.bus_no, prg.dev_no, prg.func_no); DBG(DBG_TOOLS, dip, "barnum:0x%x, offset:0x%" PRIx64 ", acc:0x%x\n", prg.barnum, prg.offset, prg.acc_attr); DBG(DBG_TOOLS, dip, "data:0x%" PRIx64 ", phys_addr:0x%" PRIx64 "\n", prg.data, prg.phys_addr); /* * If bank num == ff, base phys addr passed in from userland. * * Normal bank specification is invalid, as there is no OBP property to * back it up. */ if (prg.barnum != PCITOOL_BASE) { prg.status = PCITOOL_OUT_OF_RANGE; rval = EINVAL; goto done; } /* Allow only size of 8-bytes. */ if (size != sizeof (uint64_t)) { prg.status = PCITOOL_INVALID_SIZE; rval = EINVAL; goto done; } /* Alignment checking. */ if (!IS_P2ALIGNED(prg.offset, size)) { DBG(DBG_TOOLS, dip, "not aligned.\n"); prg.status = PCITOOL_NOT_ALIGNED; rval = EINVAL; goto done; } prg.phys_addr += prg.offset; /* * Only the hypervisor can access nexus registers. As a result, there * can be no error recovery in the OS. If there is an error, the * system will go down, but with a trap type 7f. The OS cannot * intervene with this kind of trap. */ /* Access device. prg.status is modified. */ rval = pxtool_phys_access(px_p, prg.phys_addr, &prg.data, PCITOOL_ACC_IS_BIG_ENDIAN(prg.acc_attr), is_write); done: prg.drvr_version = PCITOOL_VERSION; if (ddi_copyout(&prg, arg, sizeof (pcitool_reg_t), mode) != DDI_SUCCESS) { DBG(DBG_TOOLS, dip, "Copyout failed.\n"); return (EFAULT); } return (rval); }