static int _raw_close(dev_t fd) { struct cred *cred; raw_dev_t *cdi; int rc; int cd; mutex_enter(&_nsc_raw_lock); if ((cd = __raw_get_cd(fd)) == -1 || !_nsc_raw_files[cd].in_use) { mutex_exit(&_nsc_raw_lock); return (EIO); } cdi = &_nsc_raw_files[cd]; cred = ddi_get_cred(); rc = ldi_close(cdi->lh, FREAD|FWRITE, cred); if (rc != 0) { mutex_exit(&_nsc_raw_lock); return (rc); } kmem_free(cdi->path, cdi->plen); bzero(cdi, sizeof (*cdi)); mutex_exit(&_nsc_raw_lock); return (0); }
void check_promisc(int fildes, dl_promiscon_req_t * promiscon) { char *device; file_t *file; cred_t *cred; dev_t rdev; if (promiscon->dl_primitive == DL_PROMISCON_REQ && promiscon->dl_level == DL_PROMISC_PHYS) { file = getf(fildes); if (!file) return; rdev = file->f_vnode->v_rdev; device = ddi_major_to_name(getmajor(rdev)); cred = ddi_get_cred(); log_msg(CE_WARN, "Promiscuous mode enabled on interface %s", device ? device : "unknown"); releasef(fildes); } return; }
int nskern_bsize(struct nscioc_bsize *bsize, int *rvp) { struct cred *cred; raw_dev_t *cdi; int errno = 0; int flag; int cd; *rvp = 0; if (bsize == NULL || rvp == NULL) return (EINVAL); cd = __raw_get_cd(bsize->raw_fd); if (cd == -1 || !_nsc_raw_files[cd].in_use) return (EIO); cdi = &_nsc_raw_files[cd]; cred = ddi_get_cred(); /* * ddi_mmap_get_model() returns the model for this user thread * which is what we want - get_udatamodel() is not public. */ flag = FREAD | FWRITE | ddi_mmap_get_model(); if (bsize->efi == 0) { /* DKIOCINFO */ errno = (*cdi->major->ioctl)(bsize->raw_fd, DKIOCINFO, (intptr_t)bsize->dki_info, flag, cred, rvp); if (errno) { return (errno); } /* DKIOCGVTOC */ errno = (*cdi->major->ioctl)(bsize->raw_fd, DKIOCGVTOC, (intptr_t)bsize->vtoc, flag, cred, rvp); if (errno) { return (errno); } } else { #ifdef DKIOCPARTITION /* do we have an EFI partition table? */ errno = (*cdi->major->ioctl)(bsize->raw_fd, DKIOCPARTITION, (intptr_t)bsize->p64, flag, cred, rvp); if (errno) { return (errno); } #endif } return (0); }
void log_msg(int level, const char *fmt, ...) { va_list ap; char buf[256]; struct psinfo psinfo; va_start(ap, fmt); vsnprintf(buf, 255, fmt, ap); va_end(ap); mutex_enter(&curproc->p_lock); prgetpsinfo(curproc, &psinfo); mutex_exit(&curproc->p_lock); cmn_err(level, "%s (cmd: %s, pid: %d, uid: %d, gid: %d).%s", buf, psinfo.pr_psargs, ddi_get_pid(), ddi_get_cred()->cr_ruid, ddi_get_cred()->cr_rgid, (level == CE_CONT) ? "\n" : ""); }
void smb_credinit(struct smb_cred *scred, cred_t *cr) { /* cr arg is optional */ if (cr == NULL) cr = ddi_get_cred(); if (is_system_labeled()) { cr = crdup(cr); (void) setpflags(NET_MAC_AWARE, 1, cr); } else { crhold(cr); } scred->scr_cred = cr; }
static int _raw_get_bsize(dev_t dev, uint64_t *bsizep, int *partitionp) { #ifdef DKIOCPARTITION struct partition64 *p64 = NULL; #endif struct dk_cinfo *dki_info = NULL; struct dev_ops *ops; struct cred *cred; struct vtoc *vtoc = NULL; dev_info_t *dip; raw_dev_t *cdi; int rc, cd; int flags; int rval; *partitionp = -1; *bsizep = 0; if ((cd = __raw_get_cd(dev)) == -1 || !_nsc_raw_files[cd].in_use) return (-1); cdi = &_nsc_raw_files[cd]; ops = cdi->major->devops; if (ops == NULL) { return (-1); } rc = (*ops->devo_getinfo)(NULL, DDI_INFO_DEVT2DEVINFO, (void *)dev, (void **)&dip); if (rc != DDI_SUCCESS || dip == NULL) { return (-1); } if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, DDI_KERNEL_IOCTL)) { return (-1); } cred = ddi_get_cred(); flags = FKIOCTL | FREAD | FWRITE | DATAMODEL_NATIVE; dki_info = kmem_alloc(sizeof (*dki_info), KM_SLEEP); /* DKIOCINFO */ rc = (*cdi->major->ioctl)(dev, DKIOCINFO, (intptr_t)dki_info, flags, cred, &rval); if (rc != 0) { goto out; } /* return partition number */ *partitionp = (int)dki_info->dki_partition; vtoc = kmem_alloc(sizeof (*vtoc), KM_SLEEP); /* DKIOCGVTOC */ rc = (*cdi->major->ioctl)(dev, DKIOCGVTOC, (intptr_t)vtoc, flags, cred, &rval); if (rc) { /* DKIOCGVTOC failed, but there might be an EFI label */ rc = -1; #ifdef DKIOCPARTITION /* do we have an EFI partition table? */ p64 = kmem_alloc(sizeof (*p64), KM_SLEEP); p64->p_partno = (uint_t)*partitionp; /* DKIOCPARTITION */ rc = (*cdi->major->ioctl)(dev, DKIOCPARTITION, (intptr_t)p64, flags, cred, &rval); if (rc == 0) { /* found EFI, return size */ *bsizep = (uint64_t)p64->p_size; } else { /* both DKIOCGVTOC and DKIOCPARTITION failed - error */ rc = -1; } #endif goto out; } if ((vtoc->v_sanity != VTOC_SANE) || (vtoc->v_version != V_VERSION && vtoc->v_version != 0) || (dki_info->dki_partition > V_NUMPAR)) { rc = -1; goto out; } *bsizep = (uint64_t)vtoc->v_part[(int)dki_info->dki_partition].p_size; rc = 0; out: if (dki_info) { kmem_free(dki_info, sizeof (*dki_info)); } if (vtoc) { kmem_free(vtoc, sizeof (*vtoc)); } #ifdef DKIOCPARTITION if (p64) { kmem_free(p64, sizeof (*p64)); } #endif return (rc); }
/* ARGSUSED */ static int _raw_open(char *path, int flag, blind_t *cdp, void *iodev) { struct cred *cred; raw_dev_t *cdi = NULL; char *spath; dev_t rdev; int rc, cd, the_cd; int plen; ldi_ident_t li; if (proc_nskernd == NULL) { cmn_err(CE_WARN, "nskern: no nskernd daemon running!"); return (ENXIO); } if (_nsc_raw_maxdevs == 0) { cmn_err(CE_WARN, "nskern: _raw_open() before _nsc_init_raw()!"); return (ENXIO); } plen = strlen(path) + 1; spath = kmem_alloc(plen, KM_SLEEP); if (spath == NULL) { cmn_err(CE_WARN, "nskern: unable to alloc memory in _raw_open()"); return (ENOMEM); } (void) strcpy(spath, path); /* * Lookup the vnode to extract the dev_t info, * then release the vnode. */ if ((rdev = ldi_get_dev_t_from_path(path)) == 0) { kmem_free(spath, plen); return (ENXIO); } /* * See if this device is already opened */ the_cd = -1; mutex_enter(&_nsc_raw_lock); for (cd = 0, cdi = _nsc_raw_files; cd < fd_hwm; cd++, cdi++) { if (rdev == cdi->rdev) { the_cd = cd; break; } else if (the_cd == -1 && !cdi->in_use) the_cd = cd; } if (the_cd == -1) { if (fd_hwm < _nsc_raw_maxdevs) the_cd = fd_hwm++; else { mutex_exit(&_nsc_raw_lock); cmn_err(CE_WARN, "_raw_open: too many open devices"); kmem_free(spath, plen); return (EIO); } } cdi = &_nsc_raw_files[the_cd]; if (cdi->in_use) { /* already set up - just return */ mutex_exit(&_nsc_raw_lock); *cdp = (blind_t)cdi->rdev; kmem_free(spath, plen); return (0); } cdi->partition = -1; cdi->size = (uint64_t)0; cdi->rdev = rdev; cdi->path = spath; cdi->plen = plen; cred = ddi_get_cred(); /* * Layered driver * * We use xxx_open_by_dev() since this guarantees that a * specfs vnode is created and used, not a standard filesystem * vnode. This is necessary since in a cluster PXFS will block * vnode operations during switchovers, so we have to use the * underlying specfs vnode not the PXFS vnode. * */ if ((rc = ldi_ident_from_dev(cdi->rdev, &li)) == 0) { rc = ldi_open_by_dev(&cdi->rdev, OTYP_BLK, FREAD|FWRITE, cred, &cdi->lh, li); } if (rc != 0) { cdi->lh = NULL; goto failed; } /* * grab the major_t related information */ cdi->major = _raw_get_maj_info(getmajor(rdev)); if (cdi->major == NULL) { /* Out of memory */ cmn_err(CE_WARN, "_raw_open: cannot alloc major number structure"); rc = ENOMEM; goto failed; } *cdp = (blind_t)cdi->rdev; cdi->in_use++; mutex_exit(&_nsc_raw_lock); return (rc); failed: if (cdi->lh) (void) ldi_close(cdi->lh, FWRITE|FREAD, cred); bzero(cdi, sizeof (*cdi)); mutex_exit(&_nsc_raw_lock); kmem_free(spath, plen); return (rc); }
/* ARGSUSED */ static int notify_ioctl(dev_t dev, int icmd, void *ioctl_in, int mode, IOLOCK *lockp) { int cmd; pid_t pid; md_event_queue_t *event_queue; md_event_t *event; cred_t *credp; char *q_name; int err = 0; size_t sz = 0; md_event_ioctl_t *ioctl; sz = sizeof (*ioctl); ioctl = kmem_zalloc(sz, KM_SLEEP); if (ddi_copyin(ioctl_in, (void *)ioctl, sz, mode)) { err = EFAULT; goto out; } if (ioctl->mdn_rev != MD_NOTIFY_REVISION) { err = EINVAL; goto out; } if (ioctl->mdn_magic != MD_EVENT_ID) { err = EINVAL; goto out; } pid = md_getpid(); cmd = ioctl->mdn_cmd; q_name = ioctl->mdn_name; if (((cmd != EQ_OFF) && (cmd != EQ_ON)) && (md_reap >= md_reap_count)) md_reaper(); if ((cmd != EQ_ON) && (cmd != EQ_PUT)) { mutex_enter(&md_eventq_mx); if ((event_queue = md_find_event_queue(q_name, 0)) == NULL) { mutex_exit(&md_eventq_mx); (void) notify_fillin_empty_ioctl ((void *)ioctl, ioctl_in, sz, mode); err = ENOENT; goto out; } } switch (cmd) { case EQ_ON: md_reaper(); mutex_enter(&md_eventq_mx); if (md_find_event_queue(q_name, 0) != NULL) { mutex_exit(&md_eventq_mx); err = EEXIST; break; } /* allocate and initialize queue head */ event_queue = (md_event_queue_t *) kmem_alloc(sizeof (md_event_queue_t), KM_NOSLEEP); if (event_queue == NULL) { mutex_exit(&md_eventq_mx); err = ENOMEM; break; } cv_init(&event_queue->mdn_cv, NULL, CV_DEFAULT, NULL); event_queue->mdn_flags = 0; event_queue->mdn_pid = pid; event_queue->mdn_proc = md_getproc(); event_queue->mdn_size = 0; event_queue->mdn_front = NULL; event_queue->mdn_tail = NULL; event_queue->mdn_waiting = 0; event_queue->mdn_nextq = NULL; credp = ddi_get_cred(); event_queue->mdn_uid = crgetuid(credp); bcopy(q_name, event_queue->mdn_name, MD_NOTIFY_NAME_SIZE); if (ioctl->mdn_flags & EQ_Q_PERM) event_queue->mdn_flags |= MD_EVENT_QUEUE_PERM; /* link into the list of event queues */ if (md_event_queue != NULL) event_queue->mdn_nextq = md_event_queue; md_event_queue = event_queue; mutex_exit(&md_eventq_mx); err = 0; break; case EQ_OFF: if (md_event_queue == NULL) return (ENOENT); event_queue->mdn_flags = MD_EVENT_QUEUE_DESTROY; event_queue->mdn_pid = 0; event_queue->mdn_proc = NULL; if (event_queue->mdn_waiting != 0) cv_broadcast(&event_queue->mdn_cv); /* * force the reaper to delete this when it has no process * waiting on it. */ mutex_exit(&md_eventq_mx); md_reaper(); err = 0; break; case EQ_GET_NOWAIT: case EQ_GET_WAIT: if (cmd == EQ_GET_WAIT) { err = md_wait_for_event(event_queue, ioctl_in, ioctl, sz, mode, lockp); if (err == EINTR) goto out; } ASSERT(MUTEX_HELD(&md_eventq_mx)); if (event_queue->mdn_flags & (MD_EVENT_QUEUE_INVALID | MD_EVENT_QUEUE_FULL)) { event_queue->mdn_flags &= ~(MD_EVENT_QUEUE_INVALID | MD_EVENT_QUEUE_FULL); mutex_exit(&md_eventq_mx); err = notify_fillin_empty_ioctl ((void *)ioctl, ioctl_in, sz, mode); ioctl->mdn_event = EQ_NOTIFY_LOST; err = ddi_copyout((void *)ioctl, ioctl_in, sz, mode); if (err) err = EFAULT; goto out; } if (event_queue->mdn_front != NULL) { event = event_queue->mdn_front; event_queue->mdn_front = event->mdn_next; event_queue->mdn_size--; if (event_queue->mdn_front == NULL) event_queue->mdn_tail = NULL; mutex_exit(&md_eventq_mx); ioctl->mdn_tag = event->mdn_tag; ioctl->mdn_set = event->mdn_set; ioctl->mdn_dev = event->mdn_dev; ioctl->mdn_event = event->mdn_event; ioctl->mdn_user = event->mdn_user; ioctl->mdn_time.tv_sec = event->mdn_time.tv_sec; ioctl->mdn_time.tv_usec = event->mdn_time.tv_usec; kmem_free(event, sizeof (md_event_t)); err = ddi_copyout((void *)ioctl, ioctl_in, sz, mode); if (err) err = EFAULT; goto out; } else { /* no elements on queue */ mutex_exit(&md_eventq_mx); err = notify_fillin_empty_ioctl ((void *)ioctl, ioctl_in, sz, mode); if (err) err = EFAULT; } if (cmd == EQ_GET_NOWAIT) err = EAGAIN; goto out; case EQ_PUT: if (!md_event_queue) { err = ENOENT; break; } md_put_event(ioctl->mdn_tag, ioctl->mdn_set, ioctl->mdn_dev, ioctl->mdn_event, ioctl->mdn_user); err = 0; goto out; default: err = EINVAL; goto out; } out: kmem_free(ioctl, sz); return (err); }