/* ARGSUSED */ static int get_soft_list(dev_t dev, caddr_t arg, int mode, int *rval) { STRUCT_DECL(crypto_get_soft_list, soft_list); char *names; size_t len; uint_t count; STRUCT_INIT(soft_list, mode); if (copyin(arg, STRUCT_BUF(soft_list), STRUCT_SIZE(soft_list)) != 0) return (EFAULT); /* get the list from the core module */ if (crypto_get_soft_list(&count, &names, &len) != 0) { STRUCT_FSET(soft_list, sl_return_value, CRYPTO_FAILED); if (copyout(STRUCT_BUF(soft_list), arg, STRUCT_SIZE(soft_list)) != 0) { return (EFAULT); } return (0); } /* check if buffer is too small */ if (len > STRUCT_FGET(soft_list, sl_soft_len)) { STRUCT_FSET(soft_list, sl_soft_count, count); STRUCT_FSET(soft_list, sl_soft_len, len); STRUCT_FSET(soft_list, sl_return_value, CRYPTO_BUFFER_TOO_SMALL); kmem_free(names, len); if (copyout(STRUCT_BUF(soft_list), arg, STRUCT_SIZE(soft_list)) != 0) { return (EFAULT); } return (0); } STRUCT_FSET(soft_list, sl_soft_count, count); STRUCT_FSET(soft_list, sl_soft_len, len); STRUCT_FSET(soft_list, sl_return_value, CRYPTO_SUCCESS); if (count > 0 && copyout(names, STRUCT_FGETP(soft_list, sl_soft_names), len) != 0) { kmem_free(names, len); return (EFAULT); } kmem_free(names, len); if (copyout(STRUCT_BUF(soft_list), arg, STRUCT_SIZE(soft_list)) != 0) { return (EFAULT); } return (0); }
/* * Get the audit state information from the current process. * Return EFAULT if copyout fails. */ int getaudit_addr(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); const auditinfo_addr_t *ainfo; model_t model; if (secpolicy_audit_getattr(CRED(), B_FALSE) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); ainfo = crgetauinfo(CRED()); if (ainfo == NULL) return (EINVAL); STRUCT_FSET(info, ai_auid, ainfo->ai_auid); STRUCT_FSET(info, ai_mask, ainfo->ai_mask); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) { return (EOVERFLOW); } STRUCT_FSET(info, ai_termid.at_port, dev); } else STRUCT_FSET(info, ai_termid.at_port, ainfo->ai_termid.at_port); #else STRUCT_FSET(info, ai_termid.at_port, ainfo->ai_termid.at_port); #endif STRUCT_FSET(info, ai_termid.at_type, ainfo->ai_termid.at_type); STRUCT_FSET(info, ai_termid.at_addr[0], ainfo->ai_termid.at_addr[0]); STRUCT_FSET(info, ai_termid.at_addr[1], ainfo->ai_termid.at_addr[1]); STRUCT_FSET(info, ai_termid.at_addr[2], ainfo->ai_termid.at_addr[2]); STRUCT_FSET(info, ai_termid.at_addr[3], ainfo->ai_termid.at_addr[3]); STRUCT_FSET(info, ai_asid, ainfo->ai_asid); if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info))) return (EFAULT); return (0); }
static int getkaudit(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); model_t model; au_kcontext_t *kctx = GET_KCTX_PZ; model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); STRUCT_FSET(info, ai_auid, kctx->auk_info.ai_auid); STRUCT_FSET(info, ai_mask, kctx->auk_info.ai_namask); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, kctx->auk_info.ai_termid.at_port) == 0) { return (EOVERFLOW); } STRUCT_FSET(info, ai_termid.at_port, dev); } else { STRUCT_FSET(info, ai_termid.at_port, kctx->auk_info.ai_termid.at_port); } #else STRUCT_FSET(info, ai_termid.at_port, kctx->auk_info.ai_termid.at_port); #endif STRUCT_FSET(info, ai_termid.at_type, kctx->auk_info.ai_termid.at_type); STRUCT_FSET(info, ai_termid.at_addr[0], kctx->auk_info.ai_termid.at_addr[0]); STRUCT_FSET(info, ai_termid.at_addr[1], kctx->auk_info.ai_termid.at_addr[1]); STRUCT_FSET(info, ai_termid.at_addr[2], kctx->auk_info.ai_termid.at_addr[2]); STRUCT_FSET(info, ai_termid.at_addr[3], kctx->auk_info.ai_termid.at_addr[3]); STRUCT_FSET(info, ai_asid, kctx->auk_info.ai_asid); if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info))) return (EFAULT); return (0); }
static int setpmask(caddr_t data) { STRUCT_DECL(auditpinfo, apinfo); proc_t *proc; cred_t *newcred; auditinfo_addr_t *ainfo; struct p_audit_data *pad; model_t model; model = get_udatamodel(); STRUCT_INIT(apinfo, model); if (copyin(data, STRUCT_BUF(apinfo), STRUCT_SIZE(apinfo))) return (EFAULT); mutex_enter(&pidlock); if ((proc = prfind(STRUCT_FGET(apinfo, ap_pid))) == NULL) { mutex_exit(&pidlock); return (ESRCH); } mutex_enter(&proc->p_lock); /* so process doesn't go away */ mutex_exit(&pidlock); newcred = cralloc(); if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) { mutex_exit(&proc->p_lock); crfree(newcred); return (EINVAL); } mutex_enter(&proc->p_crlock); crcopy_to(proc->p_cred, newcred); proc->p_cred = newcred; ainfo->ai_mask = STRUCT_FGET(apinfo, ap_mask); /* * Unlock. No need to broadcast changes via set_proc_pre_sys(), * since t_pre_sys is ALWAYS on when audit is enabled... due to * syscall auditing. */ crfree(newcred); mutex_exit(&proc->p_crlock); /* Reset flag for any previous pending mask change; this supercedes */ pad = P2A(proc); ASSERT(pad != NULL); mutex_enter(&(pad->pad_lock)); pad->pad_flags &= ~PAD_SETMASK; mutex_exit(&(pad->pad_lock)); mutex_exit(&proc->p_lock); return (0); }
/* * Set the audit state information for the current process. * Return EFAULT if copyout fails. */ int setaudit(caddr_t info_p) { STRUCT_DECL(auditinfo, info); proc_t *p; cred_t *newcred; model_t model; auditinfo_addr_t *ainfo; if (secpolicy_audit_config(CRED()) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info))) return (EFAULT); newcred = cralloc(); if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) { crfree(newcred); return (EINVAL); } /* grab p_crlock and switch to new cred */ p = curproc; mutex_enter(&p->p_crlock); crcopy_to(p->p_cred, newcred); p->p_cred = newcred; /* Set audit mask, id, termid and session id as specified */ ainfo->ai_auid = STRUCT_FGET(info, ai_auid); #ifdef _LP64 /* only convert to 64 bit if coming from a 32 bit binary */ if (model == DATAMODEL_ILP32) ainfo->ai_termid.at_port = DEVEXPL(STRUCT_FGET(info, ai_termid.port)); else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.port); #else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.port); #endif ainfo->ai_termid.at_type = AU_IPv4; ainfo->ai_termid.at_addr[0] = STRUCT_FGET(info, ai_termid.machine); ainfo->ai_asid = STRUCT_FGET(info, ai_asid); ainfo->ai_mask = STRUCT_FGET(info, ai_mask); /* unlock and broadcast the cred changes */ mutex_exit(&p->p_crlock); crset(p, newcred); return (0); }
/*ARGSUSED*/ static int cpuid_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval) { char areq[16]; void *ustr; switch (cmd) { case CPUID_GET_HWCAP: { STRUCT_DECL(cpuid_get_hwcap, h); STRUCT_INIT(h, mode); if (ddi_copyin((void *)arg, STRUCT_BUF(h), STRUCT_SIZE(h), mode)) return (EFAULT); if ((ustr = STRUCT_FGETP(h, cgh_archname)) != NULL && copyinstr(ustr, areq, sizeof (areq), NULL) != 0) return (EFAULT); areq[sizeof (areq) - 1] = '\0'; if (strcmp(areq, architecture) == 0) STRUCT_FSET(h, cgh_hwcap, auxv_hwcap); #if defined(_SYSCALL32_IMPL) else if (strcmp(areq, architecture_32) == 0) STRUCT_FSET(h, cgh_hwcap, auxv_hwcap32); #endif else STRUCT_FSET(h, cgh_hwcap, 0); if (ddi_copyout(STRUCT_BUF(h), (void *)arg, STRUCT_SIZE(h), mode)) return (EFAULT); return (0); } default: return (ENOTTY); } }
/* * Get the audit state information from the current process. * Return EFAULT if copyout fails. */ static int getaudit(caddr_t info_p) { STRUCT_DECL(auditinfo, info); const auditinfo_addr_t *ainfo; model_t model; if (secpolicy_audit_getattr(CRED()) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); ainfo = crgetauinfo(CRED()); if (ainfo == NULL) return (EINVAL); /* trying to read a process with an IPv6 address? */ if (ainfo->ai_termid.at_type == AU_IPv6) return (EOVERFLOW); STRUCT_FSET(info, ai_auid, ainfo->ai_auid); STRUCT_FSET(info, ai_mask, ainfo->ai_mask); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) { return (EOVERFLOW); } STRUCT_FSET(info, ai_termid.port, dev); } else STRUCT_FSET(info, ai_termid.port, ainfo->ai_termid.at_port); #else STRUCT_FSET(info, ai_termid.port, ainfo->ai_termid.at_port); #endif STRUCT_FSET(info, ai_termid.machine, ainfo->ai_termid.at_addr[0]); STRUCT_FSET(info, ai_asid, ainfo->ai_asid); if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info))) return (EFAULT); return (0); }
static int getqctrl(caddr_t data) { au_kcontext_t *kctx = GET_KCTX_PZ; STRUCT_DECL(au_qctrl, qctrl); STRUCT_INIT(qctrl, get_udatamodel()); mutex_enter(&(kctx->auk_queue.lock)); STRUCT_FSET(qctrl, aq_hiwater, kctx->auk_queue.hiwater); STRUCT_FSET(qctrl, aq_lowater, kctx->auk_queue.lowater); STRUCT_FSET(qctrl, aq_bufsz, kctx->auk_queue.bufsz); STRUCT_FSET(qctrl, aq_delay, kctx->auk_queue.delay); mutex_exit(&(kctx->auk_queue.lock)); if (copyout(STRUCT_BUF(qctrl), data, STRUCT_SIZE(qctrl))) return (EFAULT); return (0); }
/*ARGSUSED*/ static int kcpc_ioctl(dev_t dev, int cmd, intptr_t data, int flags, cred_t *cr, int *rvp) { kthread_t *t = curthread; processorid_t cpuid; void *udata1 = NULL; void *udata2 = NULL; void *udata3 = NULL; int error; int code; STRUCT_DECL(__cpc_args, args); STRUCT_INIT(args, flags); if (curthread->t_bind_cpu != getminor(dev)) return (EAGAIN); /* someone unbound it? */ cpuid = getminor(dev); if (cmd == CPCIO_BIND || cmd == CPCIO_SAMPLE) { if (copyin((void *)data, STRUCT_BUF(args), STRUCT_SIZE(args)) == -1) return (EFAULT); udata1 = STRUCT_FGETP(args, udata1); udata2 = STRUCT_FGETP(args, udata2); udata3 = STRUCT_FGETP(args, udata3); } switch (cmd) { case CPCIO_BIND: /* * udata1 = pointer to packed nvlist buffer * udata2 = size of packed nvlist buffer * udata3 = User addr to return error subcode in. */ if (t->t_cpc_set != NULL) { (void) kcpc_unbind(t->t_cpc_set); ASSERT(t->t_cpc_set == NULL); } if ((error = kcpc_copyin_set(&t->t_cpc_set, udata1, (size_t)udata2)) != 0) { return (error); } if ((error = kcpc_verify_set(t->t_cpc_set)) != 0) { kcpc_free_set(t->t_cpc_set); t->t_cpc_set = NULL; if (copyout(&error, udata3, sizeof (error)) == -1) return (EFAULT); return (EINVAL); } if ((error = kcpc_bind_cpu(t->t_cpc_set, cpuid, &code)) != 0) { kcpc_free_set(t->t_cpc_set); t->t_cpc_set = NULL; /* * Subcodes are only returned for EINVAL and EACCESS. */ if ((error == EINVAL || error == EACCES) && copyout(&code, udata3, sizeof (code)) == -1) return (EFAULT); return (error); } return (0); case CPCIO_SAMPLE: /* * udata1 = pointer to user's buffer * udata2 = pointer to user's hrtime * udata3 = pointer to user's tick */ /* * Only CPU-bound sets may be sampled via the ioctl(). If this * set has no CPU-bound context, return an error. */ if (t->t_cpc_set == NULL) return (EINVAL); if ((error = kcpc_sample(t->t_cpc_set, udata1, udata2, udata3)) != 0) return (error); return (0); case CPCIO_RELE: if (t->t_cpc_set == NULL) return (EINVAL); return (kcpc_unbind(t->t_cpc_set)); default: return (EINVAL); } }
static int setsmask(caddr_t data) { STRUCT_DECL(auditinfo, user_info); struct proc *p; const auditinfo_addr_t *ainfo; model_t model; /* setsmask not applicable in non-global zones without perzone policy */ if (!(audit_policy & AUDIT_PERZONE) && (!INGLOBALZONE(curproc))) return (EINVAL); model = get_udatamodel(); STRUCT_INIT(user_info, model); if (copyin(data, STRUCT_BUF(user_info), STRUCT_SIZE(user_info))) return (EFAULT); mutex_enter(&pidlock); /* lock the process queue against updates */ for (p = practive; p != NULL; p = p->p_next) { cred_t *cr; /* if in non-global zone only modify processes in same zone */ if (!HASZONEACCESS(curproc, p->p_zone->zone_id)) continue; mutex_enter(&p->p_lock); /* so process doesn't go away */ /* skip system processes and ones being created or going away */ if (p->p_stat == SIDL || p->p_stat == SZOMB || (p->p_flag & (SSYS | SEXITING | SEXITLWPS))) { mutex_exit(&p->p_lock); continue; } mutex_enter(&p->p_crlock); crhold(cr = p->p_cred); mutex_exit(&p->p_crlock); ainfo = crgetauinfo(cr); if (ainfo == NULL) { mutex_exit(&p->p_lock); crfree(cr); continue; } if (ainfo->ai_asid == STRUCT_FGET(user_info, ai_asid)) { au_mask_t mask; int err; /* * Here's a process which matches the specified asid. * If its mask doesn't already match the new mask, * save the new mask in the pad, to be picked up * next syscall. */ mask = STRUCT_FGET(user_info, ai_mask); err = bcmp(&mask, &ainfo->ai_mask, sizeof (au_mask_t)); crfree(cr); if (err != 0) { struct p_audit_data *pad = P2A(p); ASSERT(pad != NULL); mutex_enter(&(pad->pad_lock)); pad->pad_flags |= PAD_SETMASK; pad->pad_newmask = mask; mutex_exit(&(pad->pad_lock)); /* * No need to call set_proc_pre_sys(), since * t_pre_sys is ALWAYS on when audit is * enabled...due to syscall auditing. */ } } else { crfree(cr); } mutex_exit(&p->p_lock); } mutex_exit(&pidlock); return (0); }
static int setqctrl(caddr_t data) { au_kcontext_t *kctx; struct au_qctrl qctrl_tmp; STRUCT_DECL(au_qctrl, qctrl); STRUCT_INIT(qctrl, get_udatamodel()); if (!(audit_policy & AUDIT_PERZONE) && !INGLOBALZONE(curproc)) return (EINVAL); kctx = GET_KCTX_NGZ; if (copyin(data, STRUCT_BUF(qctrl), STRUCT_SIZE(qctrl))) return (EFAULT); qctrl_tmp.aq_hiwater = (size_t)STRUCT_FGET(qctrl, aq_hiwater); qctrl_tmp.aq_lowater = (size_t)STRUCT_FGET(qctrl, aq_lowater); qctrl_tmp.aq_bufsz = (size_t)STRUCT_FGET(qctrl, aq_bufsz); qctrl_tmp.aq_delay = (clock_t)STRUCT_FGET(qctrl, aq_delay); /* enforce sane values */ if (qctrl_tmp.aq_hiwater <= qctrl_tmp.aq_lowater) return (EINVAL); if (qctrl_tmp.aq_hiwater < AQ_LOWATER) return (EINVAL); if (qctrl_tmp.aq_hiwater > AQ_MAXHIGH) return (EINVAL); if (qctrl_tmp.aq_bufsz < AQ_BUFSZ) return (EINVAL); if (qctrl_tmp.aq_bufsz > AQ_MAXBUFSZ) return (EINVAL); if (qctrl_tmp.aq_delay == 0) return (EINVAL); if (qctrl_tmp.aq_delay > AQ_MAXDELAY) return (EINVAL); /* update everything at once so things are consistant */ mutex_enter(&(kctx->auk_queue.lock)); kctx->auk_queue.hiwater = qctrl_tmp.aq_hiwater; kctx->auk_queue.lowater = qctrl_tmp.aq_lowater; kctx->auk_queue.bufsz = qctrl_tmp.aq_bufsz; kctx->auk_queue.delay = qctrl_tmp.aq_delay; if (kctx->auk_queue.rd_block && kctx->auk_queue.cnt > kctx->auk_queue.lowater) cv_broadcast(&(kctx->auk_queue.read_cv)); if (kctx->auk_queue.wt_block && kctx->auk_queue.cnt < kctx->auk_queue.hiwater) cv_broadcast(&(kctx->auk_queue.write_cv)); mutex_exit(&(kctx->auk_queue.lock)); return (0); }
static inline int kl_struct_len(char* struct_name) { return STRUCT_SIZE(struct_name); }
/* * the host address for AUDIT_PERZONE == 0 is that of the global * zone and for local zones it is of the current zone. */ static int setkaudit(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); model_t model; au_kcontext_t *kctx; if (!(audit_policy & AUDIT_PERZONE) && !INGLOBALZONE(curproc)) return (EINVAL); kctx = GET_KCTX_NGZ; model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info))) return (EFAULT); if ((STRUCT_FGET(info, ai_termid.at_type) != AU_IPv4) && (STRUCT_FGET(info, ai_termid.at_type) != AU_IPv6)) return (EINVAL); /* Set audit mask, termid and session id as specified */ kctx->auk_info.ai_auid = STRUCT_FGET(info, ai_auid); kctx->auk_info.ai_namask = STRUCT_FGET(info, ai_mask); #ifdef _LP64 /* only convert to 64 bit if coming from a 32 bit binary */ if (model == DATAMODEL_ILP32) kctx->auk_info.ai_termid.at_port = DEVEXPL(STRUCT_FGET(info, ai_termid.at_port)); else kctx->auk_info.ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #else kctx->auk_info.ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #endif kctx->auk_info.ai_termid.at_type = STRUCT_FGET(info, ai_termid.at_type); bzero(&kctx->auk_info.ai_termid.at_addr[0], sizeof (kctx->auk_info.ai_termid.at_addr)); kctx->auk_info.ai_termid.at_addr[0] = STRUCT_FGET(info, ai_termid.at_addr[0]); kctx->auk_info.ai_termid.at_addr[1] = STRUCT_FGET(info, ai_termid.at_addr[1]); kctx->auk_info.ai_termid.at_addr[2] = STRUCT_FGET(info, ai_termid.at_addr[2]); kctx->auk_info.ai_termid.at_addr[3] = STRUCT_FGET(info, ai_termid.at_addr[3]); kctx->auk_info.ai_asid = STRUCT_FGET(info, ai_asid); if (kctx->auk_info.ai_termid.at_type == AU_IPv6 && IN6_IS_ADDR_V4MAPPED( ((in6_addr_t *)kctx->auk_info.ai_termid.at_addr))) { kctx->auk_info.ai_termid.at_type = AU_IPv4; kctx->auk_info.ai_termid.at_addr[0] = kctx->auk_info.ai_termid.at_addr[3]; kctx->auk_info.ai_termid.at_addr[1] = 0; kctx->auk_info.ai_termid.at_addr[2] = 0; kctx->auk_info.ai_termid.at_addr[3] = 0; } if (kctx->auk_info.ai_termid.at_type == AU_IPv6) kctx->auk_hostaddr_valid = IN6_IS_ADDR_UNSPECIFIED( (in6_addr_t *)kctx->auk_info.ai_termid.at_addr) ? 0 : 1; else kctx->auk_hostaddr_valid = (kctx->auk_info.ai_termid.at_addr[0] == htonl(INADDR_ANY)) ? 0 : 1; return (0); }
/* ARGSUSED */ int dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) { struct dadk *dadkp = (struct dadk *)objp; switch (cmd) { case DKIOCGETDEF: { struct buf *bp; int err, head; unsigned char *secbuf; STRUCT_DECL(defect_header, adh); STRUCT_INIT(adh, flag & FMODELS); /* * copyin header .... * yields head number and buffer address */ if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh), flag)) return (EFAULT); head = STRUCT_FGET(adh, head); if (head < 0 || head >= dadkp->dad_phyg.g_head) return (ENXIO); secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP); if (!secbuf) return (ENOMEM); bp = getrbuf(KM_SLEEP); if (!bp) { kmem_free(secbuf, NBPSCTR); return (ENOMEM); } bp->b_edev = dev; bp->b_dev = cmpdev(dev); bp->b_flags = B_BUSY; bp->b_resid = 0; bp->b_bcount = NBPSCTR; bp->b_un.b_addr = (caddr_t)secbuf; bp->b_blkno = head; /* I had to put it somwhere! */ bp->b_forw = (struct buf *)dadkp; bp->b_back = (struct buf *)DCMD_GETDEF; mutex_enter(&dadkp->dad_cmd_mutex); dadkp->dad_cmd_count++; mutex_exit(&dadkp->dad_cmd_mutex); FLC_ENQUE(dadkp->dad_flcobjp, bp); err = biowait(bp); if (!err) { if (ddi_copyout((caddr_t)secbuf, STRUCT_FGETP(adh, buffer), NBPSCTR, flag)) err = ENXIO; } kmem_free(secbuf, NBPSCTR); freerbuf(bp); return (err); } case DIOCTL_RWCMD: { struct dadkio_rwcmd *rwcmdp; int status, rw; /* * copied in by cmdk and, if necessary, converted to the * correct datamodel */ rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg; /* * handle the complex cases here; we pass these * through to the driver, which will queue them and * handle the requests asynchronously. The simpler * cases ,which can return immediately, fail here, and * the request reverts to the dadk_ioctl routine, while * will reroute them directly to the ata driver. */ switch (rwcmdp->cmd) { case DADKIO_RWCMD_READ : /*FALLTHROUGH*/ case DADKIO_RWCMD_WRITE: rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ? B_WRITE : B_READ); status = dadk_dk_buf_setup(dadkp, (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE), rw); return (status); default: return (EINVAL); } } case DKIOC_UPDATEFW: /* * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW * to protect the firmware update from malicious use */ if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0) return (EPERM); else return (dadk_ctl_ioctl(dadkp, cmd, arg, flag)); case DKIOCFLUSHWRITECACHE: { struct buf *bp; int err = 0; struct dk_callback *dkc = (struct dk_callback *)arg; struct cmpkt *pktp; int is_sync = 1; mutex_enter(&dadkp->dad_mutex); if (dadkp->dad_noflush || ! dadkp->dad_wce) { err = dadkp->dad_noflush ? ENOTSUP : 0; mutex_exit(&dadkp->dad_mutex); /* * If a callback was requested: a * callback will always be done if the * caller saw the DKIOCFLUSHWRITECACHE * ioctl return 0, and never done if the * caller saw the ioctl return an error. */ if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback != NULL) { (*dkc->dkc_callback)(dkc->dkc_cookie, err); /* * Did callback and reported error. * Since we did a callback, ioctl * should return 0. */ err = 0; } return (err); } mutex_exit(&dadkp->dad_mutex); bp = getrbuf(KM_SLEEP); bp->b_edev = dev; bp->b_dev = cmpdev(dev); bp->b_flags = B_BUSY; bp->b_resid = 0; bp->b_bcount = 0; SET_BP_SEC(bp, 0); if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback != NULL) { struct dk_callback *dkc2 = (struct dk_callback *)kmem_zalloc( sizeof (struct dk_callback), KM_SLEEP); bcopy(dkc, dkc2, sizeof (*dkc2)); bp->b_private = dkc2; bp->b_iodone = dadk_flushdone; is_sync = 0; } /* * Setup command pkt * dadk_pktprep() can't fail since DDI_DMA_SLEEP set */ pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, DDI_DMA_SLEEP, NULL); pktp->cp_time = DADK_FLUSH_CACHE_TIME; *((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE; pktp->cp_byteleft = 0; pktp->cp_private = NULL; pktp->cp_secleft = 0; pktp->cp_srtsec = -1; pktp->cp_bytexfer = 0; CTL_IOSETUP(dadkp->dad_ctlobjp, pktp); mutex_enter(&dadkp->dad_cmd_mutex); dadkp->dad_cmd_count++; mutex_exit(&dadkp->dad_cmd_mutex); FLC_ENQUE(dadkp->dad_flcobjp, bp); if (is_sync) { err = biowait(bp); freerbuf(bp); } return (err); } default: if (!dadkp->dad_rmb) return (dadk_ctl_ioctl(dadkp, cmd, arg, flag)); } switch (cmd) { case CDROMSTOP: return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0, DADK_SILENT)); case CDROMSTART: return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT)); case DKIOCLOCK: return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT)); case DKIOCUNLOCK: return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT)); case DKIOCEJECT: case CDROMEJECT: { int ret; if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT)) { return (ret); } if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0, DADK_SILENT)) { return (ret); } mutex_enter(&dadkp->dad_mutex); dadkp->dad_iostate = DKIO_EJECTED; cv_broadcast(&dadkp->dad_state_cv); mutex_exit(&dadkp->dad_mutex); return (0); } default: return (ENOTTY); /* * cdrom audio commands */ case CDROMPAUSE: cmd = DCMD_PAUSE; break; case CDROMRESUME: cmd = DCMD_RESUME; break; case CDROMPLAYMSF: cmd = DCMD_PLAYMSF; break; case CDROMPLAYTRKIND: cmd = DCMD_PLAYTRKIND; break; case CDROMREADTOCHDR: cmd = DCMD_READTOCHDR; break; case CDROMREADTOCENTRY: cmd = DCMD_READTOCENT; break; case CDROMVOLCTRL: cmd = DCMD_VOLCTRL; break; case CDROMSUBCHNL: cmd = DCMD_SUBCHNL; break; case CDROMREADMODE2: cmd = DCMD_READMODE2; break; case CDROMREADMODE1: cmd = DCMD_READMODE1; break; case CDROMREADOFFSET: cmd = DCMD_READOFFSET; break; } return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0)); }
/* * Set the audit state information for the current process. * Return EFAULT if copyin fails. */ int setaudit_addr(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); proc_t *p; cred_t *newcred; model_t model; int i; int type; auditinfo_addr_t *ainfo; if (secpolicy_audit_config(CRED()) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info))) return (EFAULT); type = STRUCT_FGET(info, ai_termid.at_type); if ((type != AU_IPv4) && (type != AU_IPv6)) return (EINVAL); newcred = cralloc(); if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) { crfree(newcred); return (EINVAL); } /* grab p_crlock and switch to new cred */ p = curproc; mutex_enter(&p->p_crlock); crcopy_to(p->p_cred, newcred); p->p_cred = newcred; /* Set audit mask, id, termid and session id as specified */ ainfo->ai_auid = STRUCT_FGET(info, ai_auid); ainfo->ai_mask = STRUCT_FGET(info, ai_mask); #ifdef _LP64 /* only convert to 64 bit if coming from a 32 bit binary */ if (model == DATAMODEL_ILP32) ainfo->ai_termid.at_port = DEVEXPL(STRUCT_FGET(info, ai_termid.at_port)); else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #endif ainfo->ai_termid.at_type = type; bzero(&ainfo->ai_termid.at_addr[0], sizeof (ainfo->ai_termid.at_addr)); for (i = 0; i < (type/sizeof (int)); i++) ainfo->ai_termid.at_addr[i] = STRUCT_FGET(info, ai_termid.at_addr[i]); if (ainfo->ai_termid.at_type == AU_IPv6 && IN6_IS_ADDR_V4MAPPED(((in6_addr_t *)ainfo->ai_termid.at_addr))) { ainfo->ai_termid.at_type = AU_IPv4; ainfo->ai_termid.at_addr[0] = ainfo->ai_termid.at_addr[3]; ainfo->ai_termid.at_addr[1] = 0; ainfo->ai_termid.at_addr[2] = 0; ainfo->ai_termid.at_addr[3] = 0; } ainfo->ai_asid = STRUCT_FGET(info, ai_asid); /* unlock and broadcast the cred changes */ mutex_exit(&p->p_crlock); crset(p, newcred); return (0); }
/* ARGSUSED */ int ufs_fioio( struct vnode *vp, /* any file on the fs */ struct fioio *fiou, /* fioio struct in userland */ int flag, /* flag from VOP_IOCTL() */ struct cred *cr) /* credentials from ufs_ioctl */ { int error = 0; struct vnode *vpio = NULL; /* vnode for inode open */ struct inode *ipio = NULL; /* inode for inode open */ struct file *fpio = NULL; /* file for inode open */ struct inode *ip; /* inode for file system */ struct fs *fs; /* fs for file system */ STRUCT_DECL(fioio, fio); /* copy of user's fioio struct */ /* * must be privileged */ if (secpolicy_fs_config(cr, vp->v_vfsp) != 0) return (EPERM); STRUCT_INIT(fio, flag & DATAMODEL_MASK); /* * get user's copy of fioio struct */ if (copyin(fiou, STRUCT_BUF(fio), STRUCT_SIZE(fio))) return (EFAULT); ip = VTOI(vp); fs = ip->i_fs; /* * check the inode number against the fs's inode number bounds */ if (STRUCT_FGET(fio, fio_ino) < UFSROOTINO) return (ESRCH); if (STRUCT_FGET(fio, fio_ino) >= fs->fs_ncg * fs->fs_ipg) return (ESRCH); rw_enter(&ip->i_ufsvfs->vfs_dqrwlock, RW_READER); /* * get the inode */ error = ufs_iget(ip->i_vfs, STRUCT_FGET(fio, fio_ino), &ipio, cr); rw_exit(&ip->i_ufsvfs->vfs_dqrwlock); if (error) return (error); /* * check the generation number */ rw_enter(&ipio->i_contents, RW_READER); if (ipio->i_gen != STRUCT_FGET(fio, fio_gen)) { error = ESTALE; rw_exit(&ipio->i_contents); goto errout; } /* * check if the inode is free */ if (ipio->i_mode == 0) { error = ENOENT; rw_exit(&ipio->i_contents); goto errout; } rw_exit(&ipio->i_contents); /* * Adapted from copen: get a file struct * Large Files: We open this file descriptor with FOFFMAX flag * set so that it will be like a large file open. */ if (falloc(NULL, (FREAD|FOFFMAX), &fpio, STRUCT_FADDR(fio, fio_fd))) goto errout; /* * Adapted from vn_open: check access and then open the file */ vpio = ITOV(ipio); if (error = VOP_ACCESS(vpio, VREAD, 0, cr, NULL)) goto errout; if (error = VOP_OPEN(&vpio, FREAD, cr, NULL)) goto errout; /* * Adapted from copen: initialize the file struct */ fpio->f_vnode = vpio; /* * return the fd */ if (copyout(STRUCT_BUF(fio), fiou, STRUCT_SIZE(fio))) { error = EFAULT; goto errout; } setf(STRUCT_FGET(fio, fio_fd), fpio); mutex_exit(&fpio->f_tlock); return (0); errout: /* * free the file struct and fd */ if (fpio) { setf(STRUCT_FGET(fio, fio_fd), NULL); unfalloc(fpio); } /* * release the hold on the inode */ if (ipio) VN_RELE(ITOV(ipio)); return (error); }
void genConstants(void) { DEFINED_CONSTANT(PG_SIZE, PAGE_SIZE); VAR_SIZE(allocLocal); VAR_SIZE(activeThrdCntLocal); VAR_SIZE(lTransTableLocal); VAR_SIZE(exceptionLocal); VAR_SIZE(kernelInfoLocal); VAR_SIZE(extRegsLocal); VAR_SIZE(BLock); VAR_SIZE(FairBLock); VAR_SIZE(Semaphore); // VAR_SIZE(FairBLockTraced); STRUCT_SIZE(TH, Thread); STRUCT_SIZE(VS, VolatileState); STRUCT_SIZE(ER, ExpRegs); STRUCT_SIZE(EPL, EntryPointLauncher); FIELD_OFFSET1(EL, ExceptionLocal, currentProcessAnnex); FIELD_OFFSET1(EL, ExceptionLocal, exceptionStack); FIELD_OFFSET1(EL, ExceptionLocal, currentDebugStack); DEFINED_CONSTANT(KERN_THREAD_SIZE, ExceptionLocal::KernThreadSize); DEFINED_CONSTANT(KERN_PGFLT_STK_SPACE, ExceptionLocal::KernPgfltStkSpace); FIELD_OFFSET1(PA, ProcessAnnex, launcher); FIELD_OFFSET1(PA, ProcessAnnex, dispatcher); FIELD_OFFSET1(PA, ProcessAnnex, dispatcherUser); FIELD_OFFSET1(PA, ProcessAnnex, userStateOffset); FIELD_OFFSET1(PA, ProcessAnnex, trapStateOffset); FIELD_OFFSET1(TH, Thread, startSP); FIELD_OFFSET1(TH, Thread, curSP); FIELD_OFFSET1(TH, Thread, bottomSP); FIELD_OFFSET1(TH, Thread, truebottomSP); FIELD_OFFSET1(TH, Thread, altStack); FIELD_OFFSET1(TH, Thread, upcallNeeded); FIELD_OFFSET1(KI, KernelInfo, onSim); FIELD_OFFSET1(KI, KernelInfo, onHV); FIELD_OFFSET1(XR, ExtRegs, disabled); FIELD_OFFSET1(XR, ExtRegs, dispatcher); FIELD_OFFSET2(D, Dispatcher, interrupts, flags); FIELD_OFFSET1(D, Dispatcher, trapDisabledSave); FIELD_OFFSET1(D, Dispatcher, _userStateOffset); FIELD_OFFSET1(D, Dispatcher, _trapStateOffset); FIELD_OFFSET1(DD, DispatcherDefault, dispatcherStack); FIELD_OFFSET1(DD, DispatcherDefault, rescheduleNeeded); FIELD_OFFSET1(DD, DispatcherDefault, allowPrimitivePPCFlag); FIELD_OFFSET1(DD, DispatcherDefault, currentDebugStack); FIELD_OFFSET1(DD, DispatcherDefault, sandboxShepherd); DEFINED_CONSTANT(SCHED_DISPATCHER_SPACE, Scheduler::DISPATCHER_SPACE); DEFINED_CONSTANT(ProgExec_BOOT_STACK_SIZE, ProgExec::BOOT_STACK_SIZE); DEFINED_CONSTANT(ProgExec_THR_STK_SIZE, ProgExec::THREAD_SIZE); DEFINED_CONSTANT(ProgExec_INIT_MEM_SIZE, ProgExec::INIT_MEM_SIZE); DEFINED_CONSTANT(ProgExec_USR_STK_SIZE, ProgExec::USR_STACK_SIZE); FIELD_OFFSET1(AC, AllocCell, next); FIELD_OFFSET1(LM, LMalloc, freeList); FIELD_OFFSET1(LM, LMalloc, nodeID); FIELD_OFFSET1(LM, LMalloc, maxCount); FIELD_OFFSET1(LM, LMalloc, pool); FIELD_OFFSET1(LM, LMalloc, mallocID); #ifdef ALLOC_STATS FIELD_OFFSET1(LM, LMalloc, allocs); FIELD_OFFSET1(LM, LMalloc, frees); FIELD_OFFSET1(LM, LMalloc, remoteFrees); #endif CONSTANT(RUN_ENTRY); CONSTANT(INTERRUPT_ENTRY); CONSTANT(TRAP_ENTRY); CONSTANT(PGFLT_ENTRY); CONSTANT(IPC_CALL_ENTRY); CONSTANT(IPC_RTN_ENTRY); CONSTANT(IPC_FAULT_ENTRY); CONSTANT(SVC_ENTRY); #if !defined(USE_EXPEDIENT_USER_PGFLT) || \ !defined(USE_EXPEDIENT_PPC) || \ !defined(USE_EXPEDIENT_SCHEDULER) || \ !defined(USE_EXPEDIENT_RESERVED_THREAD) || \ !defined(USE_EXPEDIENT_USER_RESUME) || \ !defined(USE_EXPEDIENT_INTERRUPT) || \ !defined(USE_EXPEDIENT_SVC) FIELD_OFFSET1(EL, ExceptionLocal, kernelProcessAnnex); FIELD_OFFSET1(EL, ExceptionLocal, currentSegmentTable); FIELD_OFFSET1(EL, ExceptionLocal, trcInfoMask); FIELD_OFFSET1(EL, ExceptionLocal, trcInfoIndexMask); FIELD_OFFSET1(EL, ExceptionLocal, trcControl); FIELD_OFFSET1(EL, ExceptionLocal, trcArray); FIELD_OFFSET2(EL, ExceptionLocal, ipcTargetTable, _tableOffsetMask); FIELD_OFFSET2(EL, ExceptionLocal, ipcTargetTable, _table); DEFINED_CONSTANT(RD_HASH_OFFSET, IPCTargetTable::RD_HASH_OFFSET); FIELD_OFFSET2(EL, ExceptionLocal, dispatchQueue, cdaBorrowersTop); FIELD_OFFSET2(EL, ExceptionLocal, dispatchQueue, cdaBorrowers); DEFINED_CONSTANT(LOG_CDA_BORROWERS_SIZE, DispatchQueue::LOG_CDA_BORROWERS_SIZE); DEFINED_CONSTANT(CDA_BORROWERS_SIZE, DispatchQueue::CDA_BORROWERS_SIZE); FIELD_OFFSET1(PA, ProcessAnnex, reservedThread); FIELD_OFFSET1(PA, ProcessAnnex, excStateOffset); FIELD_OFFSET1(PA, ProcessAnnex, segmentTable); FIELD_OFFSET1(PA, ProcessAnnex, commID); FIELD_OFFSET1(PA, ProcessAnnex, ipcTargetNext); FIELD_OFFSET1(PA, ProcessAnnex, isKernel); FIELD_OFFSET1(PA, ProcessAnnex, ppcTargetID); FIELD_OFFSET1(PA, ProcessAnnex, ppcThreadID); FIELD_OFFSET1(PA, ProcessAnnex, cpuDomainNext); DEFINED_CONSTANT(PPC_PRMTV_MARKER, uval(PPC_PRIMITIVE_MARKER)); ENUM_VALUE(SOFTINTR, SoftIntr, PREEMPT); FIELD_OFFSET1(TH, Thread, next); FIELD_OFFSET1(TH, Thread, targetID); FIELD_OFFSET1(TH, Thread, threadID); FIELD_OFFSET1(TH, Thread, activeCntP); FIELD_OFFSET1(TH, Thread, groups); FIELD_OFFSET1(ATC, ActiveThrdCnt, genIndexAndActivationCnt); FIELD_OFFSET1(ATC, ActiveThrdCnt, activeCnt); DEFINED_CONSTANT(ATC_COUNT_BITS, ActiveThrdCnt::COUNT_BITS); FIELD_OFFSET1(D, Dispatcher, hasWork); FIELD_OFFSET1(D, Dispatcher, ipcFaultReason); FIELD_OFFSET1(DD, DispatcherDefault, freeList); FIELD_OFFSET1(DD, DispatcherDefault, threadArraySize); FIELD_OFFSET1(DD, DispatcherDefault, threadArray); FIELD_OFFSET1(DD, DispatcherDefault, preemptRequested); FIELD_OFFSET1(DD, DispatcherDefault, barredGroups); FIELD_OFFSET1(DD, DispatcherDefault, barredList); FIELD_OFFSET2(DD, DispatcherDefault, published, xhandleTable); FIELD_OFFSET2(DD, DispatcherDefault, published, xhandleTableLimit); FIELD_OFFSET1(XR, ExtRegs, ppcPageLength); DEFINED_CONSTANT(VP_WILD, SysTypes::VP_WILD); DEFINED_CONSTANT(COMMID_VP_SHIFT, SysTypes::COMMID_VP_SHIFT); DEFINED_CONSTANT(PID_BITS, SysTypes::PID_BITS); DEFINED_CONSTANT(COMMID_PID_SHIFT, SysTypes::COMMID_PID_SHIFT); DEFINED_CONSTANT(COMMID_RD_SHIFT, SysTypes::COMMID_RD_SHIFT); DEFINED_CONSTANT(RD_MASK, SysTypes::RD_MASK); ENUM_VALUE(XBO, XBaseObj, LOG_SIZE_IN_UVALS); ENUM_VALUE(XBO, XBaseObj, FIRST_METHOD); FIELD_OFFSET1(XBO, XBaseObj, seqNo); FIELD_OFFSET1(XBO, XBaseObj, __nummeth); ENUM_VALUE(VTE, COVTableEntry, LOG_SIZE_IN_UVALS); FIELD_OFFSET1(VTE, COVTableEntry, _func); DEFINED_CONSTANT(SEQNO_SHIFT, _XHANDLE_SEQNO_SHIFT); DEFINED_CONSTANT(SEQNO_BITS, _XHANDLE_SEQNO_BITS); DEFINED_CONSTANT(IDX_SHIFT, _XHANDLE_IDX_SHIFT); DEFINED_CONSTANT(IDX_BITS, _XHANDLE_IDX_BITS); DEFINED_CONSTANT(ERRNO_INVAL, EINVAL); DEFINED_CONSTANT(ERRNO_NOMEM, ENOMEM); DEFINED_CONSTANT(ERRNO_PERM, EPERM); DEFINED_CONSTANT(ERRNO_AGAIN, EAGAIN); DEFINED_CONSTANT(ERRNO_SRCH, ESRCH); FIELD_OFFSET2(KI_TI, KernelInfo, traceInfo, mask); FIELD_OFFSET2(KI_TI, KernelInfo, traceInfo, indexMask); FIELD_OFFSET2(KI_TI, KernelInfo, traceInfo, traceControl); FIELD_OFFSET2(KI_TI, KernelInfo, traceInfo, traceArray); FIELD_OFFSET1(TC, TraceControl, index); FIELD_OFFSET1(TC, TraceControl, bufferCount); DEFINED_CONSTANT(TRC_BUFFER_NUMBER_BITS, TRACE_BUFFER_NUMBER_BITS); DEFINED_CONSTANT(TRC_BUFFER_OFFSET_BITS, TRACE_BUFFER_OFFSET_BITS); DEFINED_CONSTANT(TRC_BUFFER_OFFSET_MASK, TRACE_BUFFER_OFFSET_MASK); DEFINED_CONSTANT(TRC_TIMESTAMP_BITS, TRACE_TIMESTAMP_BITS); DEFINED_CONSTANT(TRC_TIMESTAMP_SHIFT, TRACE_TIMESTAMP_SHIFT); DEFINED_CONSTANT(TRC_LAYER_ID_BITS, TRACE_LAYER_ID_BITS); DEFINED_CONSTANT(TRC_LAYER_ID_SHIFT, TRACE_LAYER_ID_SHIFT); DEFINED_CONSTANT(TRC_MAJOR_ID_BITS, TRACE_MAJOR_ID_BITS); DEFINED_CONSTANT(TRC_MAJOR_ID_SHIFT, TRACE_MAJOR_ID_SHIFT); DEFINED_CONSTANT(TRC_LENGTH_BITS, TRACE_LENGTH_BITS); DEFINED_CONSTANT(TRC_LENGTH_SHIFT, TRACE_LENGTH_SHIFT); DEFINED_CONSTANT(TRC_DATA_BITS, TRACE_DATA_BITS); DEFINED_CONSTANT(TRC_DATA_SHIFT, TRACE_DATA_SHIFT); DEFINED_CONSTANT(TRC_K42_LAYER_ID, TRACE_K42_LAYER_ID); DEFINED_CONSTANT(TRC_EXCEPTION_MAJOR_ID, TRACE_EXCEPTION_MAJOR_ID); DEFINED_CONSTANT(TRC_EXCEPTION_PPC_CALL, TRACE_EXCEPTION_PPC_CALL); DEFINED_CONSTANT(TRC_EXCEPTION_PPC_RETURN, TRACE_EXCEPTION_PPC_RETURN); DEFINED_CONSTANT(TRC_SCHEDULER_MAJOR_ID, TRACE_SCHEDULER_MAJOR_ID); DEFINED_CONSTANT(TRC_SCHEDULER_CUR_THREAD, TRACE_SCHEDULER_CUR_THREAD); DEFINED_CONSTANT(TRC_SCHEDULER_PPC_XOBJ_FCT, TRACE_SCHEDULER_PPC_XOBJ_FCT); #endif #include __MINC(genConstantsArch.C) }
static int getpinfo_addr(caddr_t data, int len) { STRUCT_DECL(auditpinfo_addr, apinfo); proc_t *proc; const auditinfo_addr_t *ainfo; model_t model; cred_t *cr, *newcred; model = get_udatamodel(); STRUCT_INIT(apinfo, model); if (len < STRUCT_SIZE(apinfo)) return (EOVERFLOW); if (copyin(data, STRUCT_BUF(apinfo), STRUCT_SIZE(apinfo))) return (EFAULT); newcred = cralloc(); mutex_enter(&pidlock); if ((proc = prfind(STRUCT_FGET(apinfo, ap_pid))) == NULL) { mutex_exit(&pidlock); crfree(newcred); return (ESRCH); } mutex_enter(&proc->p_lock); /* so process doesn't go away */ mutex_exit(&pidlock); audit_update_context(proc, newcred); /* make sure it's up-to-date */ mutex_enter(&proc->p_crlock); crhold(cr = proc->p_cred); mutex_exit(&proc->p_crlock); mutex_exit(&proc->p_lock); ainfo = crgetauinfo(cr); if (ainfo == NULL) { crfree(cr); return (EINVAL); } STRUCT_FSET(apinfo, ap_auid, ainfo->ai_auid); STRUCT_FSET(apinfo, ap_asid, ainfo->ai_asid); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) { crfree(cr); return (EOVERFLOW); } STRUCT_FSET(apinfo, ap_termid.at_port, dev); } else STRUCT_FSET(apinfo, ap_termid.at_port, ainfo->ai_termid.at_port); #else STRUCT_FSET(apinfo, ap_termid.at_port, ainfo->ai_termid.at_port); #endif STRUCT_FSET(apinfo, ap_termid.at_type, ainfo->ai_termid.at_type); STRUCT_FSET(apinfo, ap_termid.at_addr[0], ainfo->ai_termid.at_addr[0]); STRUCT_FSET(apinfo, ap_termid.at_addr[1], ainfo->ai_termid.at_addr[1]); STRUCT_FSET(apinfo, ap_termid.at_addr[2], ainfo->ai_termid.at_addr[2]); STRUCT_FSET(apinfo, ap_termid.at_addr[3], ainfo->ai_termid.at_addr[3]); STRUCT_FSET(apinfo, ap_mask, ainfo->ai_mask); crfree(cr); if (copyout(STRUCT_BUF(apinfo), data, STRUCT_SIZE(apinfo))) return (EFAULT); return (0); }
/* * msgctl system call. * * gets q lock (via ipc_lookup), releases before return. * may call users of msg_lock */ static int msgctl(int msgid, int cmd, void *arg) { STRUCT_DECL(msqid_ds, ds); /* SVR4 queue work area */ kmsqid_t *qp; /* ptr to associated q */ int error; struct cred *cr; model_t mdl = get_udatamodel(); struct msqid_ds64 ds64; kmutex_t *lock; proc_t *pp = curproc; STRUCT_INIT(ds, mdl); cr = CRED(); /* * Perform pre- or non-lookup actions (e.g. copyins, RMID). */ switch (cmd) { case IPC_SET: if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) return (set_errno(EFAULT)); break; case IPC_SET64: if (copyin(arg, &ds64, sizeof (struct msqid_ds64))) return (set_errno(EFAULT)); break; case IPC_RMID: if (error = ipc_rmid(msq_svc, msgid, cr)) return (set_errno(error)); return (0); } /* * get msqid_ds for this msgid */ if ((lock = ipc_lookup(msq_svc, msgid, (kipc_perm_t **)&qp)) == NULL) return (set_errno(EINVAL)); switch (cmd) { case IPC_SET: if (STRUCT_FGET(ds, msg_qbytes) > qp->msg_qbytes && secpolicy_ipc_config(cr) != 0) { mutex_exit(lock); return (set_errno(EPERM)); } if (error = ipcperm_set(msq_svc, cr, &qp->msg_perm, &STRUCT_BUF(ds)->msg_perm, mdl)) { mutex_exit(lock); return (set_errno(error)); } qp->msg_qbytes = STRUCT_FGET(ds, msg_qbytes); qp->msg_ctime = gethrestime_sec(); break; case IPC_STAT: if (error = ipcperm_access(&qp->msg_perm, MSG_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (qp->msg_rcv_cnt) qp->msg_perm.ipc_mode |= MSG_RWAIT; if (qp->msg_snd_cnt) qp->msg_perm.ipc_mode |= MSG_WWAIT; ipcperm_stat(&STRUCT_BUF(ds)->msg_perm, &qp->msg_perm, mdl); qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); STRUCT_FSETP(ds, msg_first, NULL); /* kernel addr */ STRUCT_FSETP(ds, msg_last, NULL); STRUCT_FSET(ds, msg_cbytes, qp->msg_cbytes); STRUCT_FSET(ds, msg_qnum, qp->msg_qnum); STRUCT_FSET(ds, msg_qbytes, qp->msg_qbytes); STRUCT_FSET(ds, msg_lspid, qp->msg_lspid); STRUCT_FSET(ds, msg_lrpid, qp->msg_lrpid); STRUCT_FSET(ds, msg_stime, qp->msg_stime); STRUCT_FSET(ds, msg_rtime, qp->msg_rtime); STRUCT_FSET(ds, msg_ctime, qp->msg_ctime); break; case IPC_SET64: mutex_enter(&pp->p_lock); if ((ds64.msgx_qbytes > qp->msg_qbytes) && secpolicy_ipc_config(cr) != 0 && rctl_test(rc_process_msgmnb, pp->p_rctls, pp, ds64.msgx_qbytes, RCA_SAFE) & RCT_DENY) { mutex_exit(&pp->p_lock); mutex_exit(lock); return (set_errno(EPERM)); } mutex_exit(&pp->p_lock); if (error = ipcperm_set64(msq_svc, cr, &qp->msg_perm, &ds64.msgx_perm)) { mutex_exit(lock); return (set_errno(error)); } qp->msg_qbytes = ds64.msgx_qbytes; qp->msg_ctime = gethrestime_sec(); break; case IPC_STAT64: if (qp->msg_rcv_cnt) qp->msg_perm.ipc_mode |= MSG_RWAIT; if (qp->msg_snd_cnt) qp->msg_perm.ipc_mode |= MSG_WWAIT; ipcperm_stat64(&ds64.msgx_perm, &qp->msg_perm); qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); ds64.msgx_cbytes = qp->msg_cbytes; ds64.msgx_qnum = qp->msg_qnum; ds64.msgx_qbytes = qp->msg_qbytes; ds64.msgx_lspid = qp->msg_lspid; ds64.msgx_lrpid = qp->msg_lrpid; ds64.msgx_stime = qp->msg_stime; ds64.msgx_rtime = qp->msg_rtime; ds64.msgx_ctime = qp->msg_ctime; break; default: mutex_exit(lock); return (set_errno(EINVAL)); } mutex_exit(lock); /* * Do copyout last (after releasing mutex). */ switch (cmd) { case IPC_STAT: if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) return (set_errno(EFAULT)); break; case IPC_STAT64: if (copyout(&ds64, arg, sizeof (struct msqid_ds64))) return (set_errno(EFAULT)); break; } return (0); }
/* * semctl - Semctl system call. */ static int semctl(int semid, uint_t semnum, int cmd, uintptr_t arg) { ksemid_t *sp; /* ptr to semaphore header */ struct sem *p; /* ptr to semaphore */ unsigned int i; /* loop control */ ushort_t *vals, *vp; size_t vsize = 0; int error = 0; int retval = 0; struct cred *cr; kmutex_t *lock; model_t mdl = get_udatamodel(); STRUCT_DECL(semid_ds, sid); struct semid_ds64 ds64; STRUCT_INIT(sid, mdl); cr = CRED(); /* * Perform pre- or non-lookup actions (e.g. copyins, RMID). */ switch (cmd) { case IPC_SET: if (copyin((void *)arg, STRUCT_BUF(sid), STRUCT_SIZE(sid))) return (set_errno(EFAULT)); break; case IPC_SET64: if (copyin((void *)arg, &ds64, sizeof (struct semid_ds64))) return (set_errno(EFAULT)); break; case SETALL: if ((lock = ipc_lookup(sem_svc, semid, (kipc_perm_t **)&sp)) == NULL) return (set_errno(EINVAL)); vsize = sp->sem_nsems * sizeof (*vals); mutex_exit(lock); /* allocate space to hold all semaphore values */ vals = kmem_alloc(vsize, KM_SLEEP); if (copyin((void *)arg, vals, vsize)) { kmem_free(vals, vsize); return (set_errno(EFAULT)); } break; case IPC_RMID: if (error = ipc_rmid(sem_svc, semid, cr)) return (set_errno(error)); return (0); } if ((lock = ipc_lookup(sem_svc, semid, (kipc_perm_t **)&sp)) == NULL) { if (vsize != 0) kmem_free(vals, vsize); return (set_errno(EINVAL)); } switch (cmd) { /* Set ownership and permissions. */ case IPC_SET: if (error = ipcperm_set(sem_svc, cr, &sp->sem_perm, &STRUCT_BUF(sid)->sem_perm, mdl)) { mutex_exit(lock); return (set_errno(error)); } sp->sem_ctime = gethrestime_sec(); mutex_exit(lock); return (0); /* Get semaphore data structure. */ case IPC_STAT: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } ipcperm_stat(&STRUCT_BUF(sid)->sem_perm, &sp->sem_perm, mdl); STRUCT_FSETP(sid, sem_base, NULL); /* kernel addr */ STRUCT_FSET(sid, sem_nsems, sp->sem_nsems); STRUCT_FSET(sid, sem_otime, sp->sem_otime); STRUCT_FSET(sid, sem_ctime, sp->sem_ctime); STRUCT_FSET(sid, sem_binary, sp->sem_binary); mutex_exit(lock); if (copyout(STRUCT_BUF(sid), (void *)arg, STRUCT_SIZE(sid))) return (set_errno(EFAULT)); return (0); case IPC_SET64: if (error = ipcperm_set64(sem_svc, cr, &sp->sem_perm, &ds64.semx_perm)) { mutex_exit(lock); return (set_errno(error)); } sp->sem_ctime = gethrestime_sec(); mutex_exit(lock); return (0); case IPC_STAT64: ipcperm_stat64(&ds64.semx_perm, &sp->sem_perm); ds64.semx_nsems = sp->sem_nsems; ds64.semx_otime = sp->sem_otime; ds64.semx_ctime = sp->sem_ctime; mutex_exit(lock); if (copyout(&ds64, (void *)arg, sizeof (struct semid_ds64))) return (set_errno(EFAULT)); return (0); /* Get # of processes sleeping for greater semval. */ case GETNCNT: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].semncnt; mutex_exit(lock); return (retval); /* Get pid of last process to operate on semaphore. */ case GETPID: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].sempid; mutex_exit(lock); return (retval); /* Get semval of one semaphore. */ case GETVAL: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].semval; mutex_exit(lock); return (retval); /* Get all semvals in set. */ case GETALL: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } /* allocate space to hold all semaphore values */ vsize = sp->sem_nsems * sizeof (*vals); vals = vp = kmem_alloc(vsize, KM_SLEEP); for (i = sp->sem_nsems, p = sp->sem_base; i--; p++, vp++) bcopy(&p->semval, vp, sizeof (p->semval)); mutex_exit(lock); if (copyout((void *)vals, (void *)arg, vsize)) { kmem_free(vals, vsize); return (set_errno(EFAULT)); } kmem_free(vals, vsize); return (0); /* Get # of processes sleeping for semval to become zero. */ case GETZCNT: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].semzcnt; mutex_exit(lock); return (retval); /* Set semval of one semaphore. */ case SETVAL: if (error = ipcperm_access(&sp->sem_perm, SEM_A, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } if ((uint_t)arg > USHRT_MAX) { mutex_exit(lock); return (set_errno(ERANGE)); } p = &sp->sem_base[semnum]; if ((p->semval = (ushort_t)arg) != 0) { if (p->semncnt) { cv_broadcast(&p->semncnt_cv); } } else if (p->semzcnt) { cv_broadcast(&p->semzcnt_cv); } p->sempid = curproc->p_pid; sem_undo_clear(sp, (ushort_t)semnum, (ushort_t)semnum); mutex_exit(lock); return (0); /* Set semvals of all semaphores in set. */ case SETALL: /* Check if semaphore set has been deleted and reallocated. */ if (sp->sem_nsems * sizeof (*vals) != vsize) { error = set_errno(EINVAL); goto seterr; } if (error = ipcperm_access(&sp->sem_perm, SEM_A, cr)) { error = set_errno(error); goto seterr; } sem_undo_clear(sp, 0, sp->sem_nsems - 1); for (i = 0, p = sp->sem_base; i < sp->sem_nsems; (p++)->sempid = curproc->p_pid) { if ((p->semval = vals[i++]) != 0) { if (p->semncnt) { cv_broadcast(&p->semncnt_cv); } } else if (p->semzcnt) { cv_broadcast(&p->semzcnt_cv); } } seterr: mutex_exit(lock); kmem_free(vals, vsize); return (error); default: mutex_exit(lock); return (set_errno(EINVAL)); } /* NOTREACHED */ }