/* ARGSUSED */ static size_t iscsi_net_poll(void *socket, clock_t timeout) { int pflag; char msg[64]; size_t recv = 0; ksocket_t ks = (ksocket_t)socket; if (get_udatamodel() == DATAMODEL_NONE || get_udatamodel() == DATAMODEL_NATIVE) { struct timeval tl; /* timeout is millisecond */ tl.tv_sec = timeout / 1000; tl.tv_usec = (timeout % 1000) * 1000; if (ksocket_setsockopt(ks, SOL_SOCKET, SO_RCVTIMEO, &tl, sizeof (struct timeval), CRED())) return (0); } else { struct timeval32 tl; /* timeout is millisecond */ tl.tv_sec = timeout / 1000; tl.tv_usec = (timeout % 1000) * 1000; if (ksocket_setsockopt(ks, SOL_SOCKET, SO_RCVTIMEO, &tl, sizeof (struct timeval32), CRED())) return (0); } pflag = MSG_ANY; bzero(msg, sizeof (msg)); return (ksocket_recv(ks, msg, sizeof (msg), pflag, &recv, CRED())); }
/* * Get the time accounting information for the calling LWP. */ int lwp_info(timestruc_t *tvp) { timestruc_t tv[2]; hrtime_t hrutime, hrstime; klwp_t *lwp = ttolwp(curthread); hrutime = lwp->lwp_mstate.ms_acct[LMS_USER]; hrstime = lwp->lwp_mstate.ms_acct[LMS_SYSTEM] + lwp->lwp_mstate.ms_acct[LMS_TRAP]; scalehrtime(&hrutime); scalehrtime(&hrstime); hrt2ts(hrutime, &tv[0]); hrt2ts(hrstime, &tv[1]); if (get_udatamodel() == DATAMODEL_NATIVE) { if (copyout(tv, tvp, sizeof (tv))) return (set_errno(EFAULT)); } else { timestruc32_t tv32[2]; if (TIMESPEC_OVERFLOW(&tv[0]) || TIMESPEC_OVERFLOW(&tv[1])) return (set_errno(EOVERFLOW)); /* unlikely */ TIMESPEC_TO_TIMESPEC32(&tv32[0], &tv[0]); TIMESPEC_TO_TIMESPEC32(&tv32[1], &tv[1]); if (copyout(tv32, tvp, sizeof (tv32))) return (set_errno(EFAULT)); } return (0); }
/* * The utime() system call is no longer invoked from libc. * The utime() function has been implemented in libc using * a call to utimensat(). The kernel code for utime() * should be expunged as soon as there is no longer a need * to run Solaris 10 and prior versions of libc on the system. */ int utime(char *fname, time_t *tptr) { time_t tv[2]; struct vattr vattr; int flags; if (tptr != NULL) { if (get_udatamodel() == DATAMODEL_NATIVE) { if (copyin(tptr, tv, sizeof (tv))) return (set_errno(EFAULT)); } else { time32_t tv32[2]; if (copyin(tptr, &tv32, sizeof (tv32))) return (set_errno(EFAULT)); tv[0] = (time_t)tv32[0]; tv[1] = (time_t)tv32[1]; } vattr.va_atime.tv_sec = tv[0]; vattr.va_atime.tv_nsec = 0; vattr.va_mtime.tv_sec = tv[1]; vattr.va_mtime.tv_nsec = 0; flags = ATTR_UTIME; } else { gethrestime(&vattr.va_atime); vattr.va_mtime = vattr.va_atime; flags = 0; } vattr.va_mask = AT_ATIME|AT_MTIME; return (cfutimesat(AT_FDCWD, fname, 1, &vattr, flags, FOLLOW)); }
/* * Read a mem_name_t from user-space and store it in the mem_name_t * pointed to by the mem_name argument. */ static int mm_read_mem_name(intptr_t data, mem_name_t *mem_name) { if (get_udatamodel() == DATAMODEL_NATIVE) { if (copyin((void *)data, mem_name, sizeof (mem_name_t))) return (EFAULT); } #ifdef _SYSCALL32 else { mem_name32_t mem_name32; if (copyin((void *)data, &mem_name32, sizeof (mem_name32_t))) return (EFAULT); mem_name->m_addr = mem_name32.m_addr; mem_name->m_synd = mem_name32.m_synd; mem_name->m_type[0] = mem_name32.m_type[0]; mem_name->m_type[1] = mem_name32.m_type[1]; mem_name->m_name = (caddr_t)(uintptr_t)mem_name32.m_name; mem_name->m_namelen = (size_t)mem_name32.m_namelen; mem_name->m_sid = (caddr_t)(uintptr_t)mem_name32.m_sid; mem_name->m_sidlen = (size_t)mem_name32.m_sidlen; } #endif /* _SYSCALL32 */ return (0); }
/* * Copy in the relative timeout provided by the application and convert it * to an absolute timeout. */ static int get_timeout(void *lx_timeout, timestruc_t *timeout) { timestruc_t now; if (get_udatamodel() == DATAMODEL_NATIVE) { if (copyin(lx_timeout, timeout, sizeof (timestruc_t))) return (EFAULT); } #ifdef _SYSCALL32_IMPL else { timestruc32_t timeout32; if (copyin(lx_timeout, &timeout32, sizeof (timestruc32_t))) return (EFAULT); timeout->tv_sec = (time_t)timeout32.tv_sec; timeout->tv_nsec = timeout32.tv_nsec; } #endif gethrestime(&now); if (itimerspecfix(timeout)) return (EINVAL); timespecadd(timeout, &now); return (0); }
/* * Helper function for semop - copies in the provided timespec and * computes the absolute future time after which we must return. */ static int compute_timeout(timespec_t **tsp, timespec_t *ts, timespec_t *now, timespec_t *timeout) { model_t datamodel = get_udatamodel(); if (datamodel == DATAMODEL_NATIVE) { if (copyin(timeout, ts, sizeof (timespec_t))) return (EFAULT); } else { timespec32_t ts32; if (copyin(timeout, &ts32, sizeof (timespec32_t))) return (EFAULT); TIMESPEC32_TO_TIMESPEC(ts, &ts32) } if (itimerspecfix(ts)) return (EINVAL); /* * Convert the timespec value into absolute time. */ timespecadd(ts, now); *tsp = ts; return (0); }
enum ftt_type _fp_read_extword( const uint64_t *address, /* FPU data address. */ uint64_t *pvalue, /* Place for extended word value. */ fp_simd_type *pfpsd) /* Pointer to fpu simulator data. */ { if (((uintptr_t)address & 0x7) != 0) return (ftt_alignment); /* Must be extword-aligned. */ if (get_udatamodel() == DATAMODEL_ILP32) { /* * If this is a 32-bit program, chop the address accordingly. * The intermediate uintptr_t casts prevent warnings under a * certain compiler, and the temporary 32 bit storage is * intended to force proper code generation and break up what * would otherwise be a quadruple cast. */ caddr32_t address32 = (caddr32_t)(uintptr_t)address; address = (uint64_t *)(uintptr_t)address32; } if (fuword64(address, pvalue) == -1) { pfpsd->fp_trapaddr = (caddr_t)address; pfpsd->fp_traprw = S_READ; return (ftt_fault); } return (ftt_none); }
int uadmin(int cmd, int fcn, uintptr_t mdep) { int error = 0, rv = 0; size_t nbytes = 0; cred_t *credp = CRED(); char *bootargs = NULL; int reset_status = 0; if (cmd == A_SHUTDOWN && fcn == AD_FASTREBOOT_DRYRUN) { ddi_walk_devs(ddi_root_node(), check_driver_quiesce, &reset_status); if (reset_status != 0) return (EIO); else return (0); } /* * The swapctl system call doesn't have its own entry point: it uses * uadmin as a wrapper so we just call it directly from here. */ if (cmd == A_SWAPCTL) { if (get_udatamodel() == DATAMODEL_NATIVE) error = swapctl(fcn, (void *)mdep, &rv); #if defined(_SYSCALL32_IMPL) else error = swapctl32(fcn, (void *)mdep, &rv); #endif /* _SYSCALL32_IMPL */ return (error ? set_errno(error) : rv); } /* * Certain subcommands intepret a non-NULL mdep value as a pointer to * a boot string. We pull that in as bootargs, if applicable. */ if (mdep != NULL && (cmd == A_SHUTDOWN || cmd == A_REBOOT || cmd == A_DUMP || cmd == A_FREEZE || cmd == A_CONFIG)) { bootargs = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP); if ((error = copyinstr((const char *)mdep, bootargs, BOOTARGS_MAX, &nbytes)) != 0) { kmem_free(bootargs, BOOTARGS_MAX); return (set_errno(error)); } } /* * Invoke the appropriate kadmin() routine. */ if (getzoneid() != GLOBAL_ZONEID) error = zone_kadmin(cmd, fcn, bootargs, credp); else error = kadmin(cmd, fcn, bootargs, credp); if (bootargs != NULL) kmem_free(bootargs, BOOTARGS_MAX); return (error ? set_errno(error) : 0); }
static int setpmask(caddr_t data) { STRUCT_DECL(auditpinfo, apinfo); proc_t *proc; cred_t *newcred; auditinfo_addr_t *ainfo; struct p_audit_data *pad; model_t model; model = get_udatamodel(); STRUCT_INIT(apinfo, model); if (copyin(data, STRUCT_BUF(apinfo), STRUCT_SIZE(apinfo))) return (EFAULT); mutex_enter(&pidlock); if ((proc = prfind(STRUCT_FGET(apinfo, ap_pid))) == NULL) { mutex_exit(&pidlock); return (ESRCH); } mutex_enter(&proc->p_lock); /* so process doesn't go away */ mutex_exit(&pidlock); newcred = cralloc(); if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) { mutex_exit(&proc->p_lock); crfree(newcred); return (EINVAL); } mutex_enter(&proc->p_crlock); crcopy_to(proc->p_cred, newcred); proc->p_cred = newcred; ainfo->ai_mask = STRUCT_FGET(apinfo, ap_mask); /* * Unlock. No need to broadcast changes via set_proc_pre_sys(), * since t_pre_sys is ALWAYS on when audit is enabled... due to * syscall auditing. */ crfree(newcred); mutex_exit(&proc->p_crlock); /* Reset flag for any previous pending mask change; this supercedes */ pad = P2A(proc); ASSERT(pad != NULL); mutex_enter(&(pad->pad_lock)); pad->pad_flags &= ~PAD_SETMASK; mutex_exit(&(pad->pad_lock)); mutex_exit(&proc->p_lock); return (0); }
static int get_timespec_vattr(timespec_t *tsptr, struct vattr *vattr, int *flags) { timespec_t ts[2]; timespec_t now; uint_t mask; if (tsptr != NULL) { if (get_udatamodel() == DATAMODEL_NATIVE) { if (copyin(tsptr, ts, sizeof (ts))) return (EFAULT); } else { timespec32_t ts32[2]; if (copyin(tsptr, ts32, sizeof (ts32))) return (EFAULT); TIMESPEC32_TO_TIMESPEC(&ts[0], &ts32[0]); TIMESPEC32_TO_TIMESPEC(&ts[1], &ts32[1]); } if (ts[0].tv_nsec == UTIME_NOW || ts[1].tv_nsec == UTIME_NOW) gethrestime(&now); mask = 0; if (ts[0].tv_nsec == UTIME_OMIT) { ts[0].tv_nsec = 0; } else { mask |= AT_ATIME; if (ts[0].tv_nsec == UTIME_NOW) ts[0] = now; else if (ts[0].tv_nsec < 0 || ts[0].tv_nsec >= NANOSEC) return (EINVAL); } if (ts[1].tv_nsec == UTIME_OMIT) { ts[1].tv_nsec = 0; } else { mask |= AT_MTIME; if (ts[1].tv_nsec == UTIME_NOW) ts[1] = now; else if (ts[1].tv_nsec < 0 || ts[1].tv_nsec >= NANOSEC) return (EINVAL); } vattr->va_atime = ts[0]; vattr->va_mtime = ts[1]; vattr->va_mask = mask; *flags = ATTR_UTIME; } else { gethrestime(&now); vattr->va_atime = now; vattr->va_mtime = now; vattr->va_mask = AT_ATIME | AT_MTIME; *flags = 0; } return (0); }
int sigresend(int sig, siginfo_t *siginfo, sigset_t *mask) { kthread_t *t = curthread; klwp_t *lwp = ttolwp(t); sigqueue_t *sqp = kmem_zalloc(sizeof (*sqp), KM_SLEEP); sigset_t set; k_sigset_t kset; int error; if (sig <= 0 || sig >= NSIG || sigismember(&cantmask, sig)) { error = EINVAL; goto bad; } if (siginfo == NULL) { sqp->sq_info.si_signo = sig; sqp->sq_info.si_code = SI_NOINFO; } else { if (copyin_siginfo(get_udatamodel(), siginfo, &sqp->sq_info)) { error = EFAULT; goto bad; } if (sqp->sq_info.si_signo != sig) { error = EINVAL; goto bad; } } if (copyin(mask, &set, sizeof (set))) { error = EFAULT; goto bad; } sigutok(&set, &kset); /* * We don't need to acquire p->p_lock here; * we are manipulating thread-private data. */ if (lwp->lwp_cursig || lwp->lwp_curinfo) { t->t_sig_check = 1; error = EAGAIN; goto bad; } lwp->lwp_cursig = sig; lwp->lwp_curinfo = sqp; schedctl_finish_sigblock(t); t->t_hold = kset; t->t_sig_check = 1; return (0); bad: kmem_free(sqp, sizeof (*sqp)); return (set_errno(error)); }
/* * Set the audit state information for the current process. * Return EFAULT if copyout fails. */ int setaudit(caddr_t info_p) { STRUCT_DECL(auditinfo, info); proc_t *p; cred_t *newcred; model_t model; auditinfo_addr_t *ainfo; if (secpolicy_audit_config(CRED()) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info))) return (EFAULT); newcred = cralloc(); if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) { crfree(newcred); return (EINVAL); } /* grab p_crlock and switch to new cred */ p = curproc; mutex_enter(&p->p_crlock); crcopy_to(p->p_cred, newcred); p->p_cred = newcred; /* Set audit mask, id, termid and session id as specified */ ainfo->ai_auid = STRUCT_FGET(info, ai_auid); #ifdef _LP64 /* only convert to 64 bit if coming from a 32 bit binary */ if (model == DATAMODEL_ILP32) ainfo->ai_termid.at_port = DEVEXPL(STRUCT_FGET(info, ai_termid.port)); else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.port); #else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.port); #endif ainfo->ai_termid.at_type = AU_IPv4; ainfo->ai_termid.at_addr[0] = STRUCT_FGET(info, ai_termid.machine); ainfo->ai_asid = STRUCT_FGET(info, ai_asid); ainfo->ai_mask = STRUCT_FGET(info, ai_mask); /* unlock and broadcast the cred changes */ mutex_exit(&p->p_crlock); crset(p, newcred); return (0); }
/* ARGSUSED */ static size_t iscsi_net_recvmsg(void *socket, struct msghdr *msg, int timeout) { int prflag = msg->msg_flags; ksocket_t ks = (ksocket_t)socket; size_t recv = 0; /* Set recv timeout */ if (get_udatamodel() == DATAMODEL_NONE || get_udatamodel() == DATAMODEL_NATIVE) { struct timeval tl; tl.tv_sec = timeout; tl.tv_usec = 0; if (ksocket_setsockopt(ks, SOL_SOCKET, SO_RCVTIMEO, &tl, sizeof (struct timeval), CRED())) return (0); } else { struct timeval32 tl; tl.tv_sec = timeout; tl.tv_usec = 0; if (ksocket_setsockopt(ks, SOL_SOCKET, SO_RCVTIMEO, &tl, sizeof (struct timeval32), CRED())) return (0); } /* * Receive the requested data. Block until all * data is received or timeout. */ ksocket_hold(ks); (void) ksocket_recvmsg(ks, msg, prflag, &recv, CRED()); ksocket_rele(ks); DTRACE_PROBE1(ksocket_recvmsg, size_t, recv); return (recv); }
static int getkaudit(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); model_t model; au_kcontext_t *kctx = GET_KCTX_PZ; model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); STRUCT_FSET(info, ai_auid, kctx->auk_info.ai_auid); STRUCT_FSET(info, ai_mask, kctx->auk_info.ai_namask); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, kctx->auk_info.ai_termid.at_port) == 0) { return (EOVERFLOW); } STRUCT_FSET(info, ai_termid.at_port, dev); } else { STRUCT_FSET(info, ai_termid.at_port, kctx->auk_info.ai_termid.at_port); } #else STRUCT_FSET(info, ai_termid.at_port, kctx->auk_info.ai_termid.at_port); #endif STRUCT_FSET(info, ai_termid.at_type, kctx->auk_info.ai_termid.at_type); STRUCT_FSET(info, ai_termid.at_addr[0], kctx->auk_info.ai_termid.at_addr[0]); STRUCT_FSET(info, ai_termid.at_addr[1], kctx->auk_info.ai_termid.at_addr[1]); STRUCT_FSET(info, ai_termid.at_addr[2], kctx->auk_info.ai_termid.at_addr[2]); STRUCT_FSET(info, ai_termid.at_addr[3], kctx->auk_info.ai_termid.at_addr[3]); STRUCT_FSET(info, ai_asid, kctx->auk_info.ai_asid); if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info))) return (EFAULT); return (0); }
/* * Get the audit state information from the current process. * Return EFAULT if copyout fails. */ int getaudit_addr(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); const auditinfo_addr_t *ainfo; model_t model; if (secpolicy_audit_getattr(CRED(), B_FALSE) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); ainfo = crgetauinfo(CRED()); if (ainfo == NULL) return (EINVAL); STRUCT_FSET(info, ai_auid, ainfo->ai_auid); STRUCT_FSET(info, ai_mask, ainfo->ai_mask); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) { return (EOVERFLOW); } STRUCT_FSET(info, ai_termid.at_port, dev); } else STRUCT_FSET(info, ai_termid.at_port, ainfo->ai_termid.at_port); #else STRUCT_FSET(info, ai_termid.at_port, ainfo->ai_termid.at_port); #endif STRUCT_FSET(info, ai_termid.at_type, ainfo->ai_termid.at_type); STRUCT_FSET(info, ai_termid.at_addr[0], ainfo->ai_termid.at_addr[0]); STRUCT_FSET(info, ai_termid.at_addr[1], ainfo->ai_termid.at_addr[1]); STRUCT_FSET(info, ai_termid.at_addr[2], ainfo->ai_termid.at_addr[2]); STRUCT_FSET(info, ai_termid.at_addr[3], ainfo->ai_termid.at_addr[3]); STRUCT_FSET(info, ai_asid, ainfo->ai_asid); if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info))) return (EFAULT); return (0); }
int mincore(caddr_t addr, size_t len, char *vecp) { struct as *as = curproc->p_as; caddr_t ea; /* end address of loop */ size_t rl; /* inner result length */ char vec[MC_CACHE]; /* local vector cache */ int error; model_t model; long llen; model = get_udatamodel(); /* * Validate form of address parameters. */ if (model == DATAMODEL_NATIVE) { llen = (long)len; } else { llen = (int32_t)(size32_t)len; } if (((uintptr_t)addr & PAGEOFFSET) != 0 || llen <= 0) return (set_errno(EINVAL)); if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY) return (set_errno(ENOMEM)); /* * Loop over subranges of interval [addr : addr + len), recovering * results internally and then copying them out to caller. Subrange * is based on the size of MC_CACHE, defined above. */ for (ea = addr + len; addr < ea; addr += MC_QUANTUM) { error = as_incore(as, addr, (size_t)MIN(MC_QUANTUM, ea - addr), vec, &rl); if (rl != 0) { rl = (rl + PAGESIZE - 1) / PAGESIZE; if (copyout(vec, vecp, rl) != 0) return (set_errno(EFAULT)); vecp += rl; } if (error != 0) return (set_errno(ENOMEM)); } return (0); }
/* * Get the audit state information from the current process. * Return EFAULT if copyout fails. */ static int getaudit(caddr_t info_p) { STRUCT_DECL(auditinfo, info); const auditinfo_addr_t *ainfo; model_t model; if (secpolicy_audit_getattr(CRED()) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); ainfo = crgetauinfo(CRED()); if (ainfo == NULL) return (EINVAL); /* trying to read a process with an IPv6 address? */ if (ainfo->ai_termid.at_type == AU_IPv6) return (EOVERFLOW); STRUCT_FSET(info, ai_auid, ainfo->ai_auid); STRUCT_FSET(info, ai_mask, ainfo->ai_mask); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) { return (EOVERFLOW); } STRUCT_FSET(info, ai_termid.port, dev); } else STRUCT_FSET(info, ai_termid.port, ainfo->ai_termid.at_port); #else STRUCT_FSET(info, ai_termid.port, ainfo->ai_termid.at_port); #endif STRUCT_FSET(info, ai_termid.machine, ainfo->ai_termid.at_addr[0]); STRUCT_FSET(info, ai_asid, ainfo->ai_asid); if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info))) return (EFAULT); return (0); }
static int getqctrl(caddr_t data) { au_kcontext_t *kctx = GET_KCTX_PZ; STRUCT_DECL(au_qctrl, qctrl); STRUCT_INIT(qctrl, get_udatamodel()); mutex_enter(&(kctx->auk_queue.lock)); STRUCT_FSET(qctrl, aq_hiwater, kctx->auk_queue.hiwater); STRUCT_FSET(qctrl, aq_lowater, kctx->auk_queue.lowater); STRUCT_FSET(qctrl, aq_bufsz, kctx->auk_queue.bufsz); STRUCT_FSET(qctrl, aq_delay, kctx->auk_queue.delay); mutex_exit(&(kctx->auk_queue.lock)); if (copyout(STRUCT_BUF(qctrl), data, STRUCT_SIZE(qctrl))) return (EFAULT); return (0); }
/* * Expunge this function when futimesat() and utimes() * are expunged from the kernel. */ static int get_timeval_vattr(struct timeval *tvptr, struct vattr *vattr, int *flags) { struct timeval tv[2]; if (tvptr != NULL) { if (get_udatamodel() == DATAMODEL_NATIVE) { if (copyin(tvptr, tv, sizeof (tv))) return (EFAULT); } else { struct timeval32 tv32[2]; if (copyin(tvptr, tv32, sizeof (tv32))) return (EFAULT); TIMEVAL32_TO_TIMEVAL(&tv[0], &tv32[0]); TIMEVAL32_TO_TIMEVAL(&tv[1], &tv32[1]); } if (tv[0].tv_usec < 0 || tv[0].tv_usec >= MICROSEC || tv[1].tv_usec < 0 || tv[1].tv_usec >= MICROSEC) return (EINVAL); vattr->va_atime.tv_sec = tv[0].tv_sec; vattr->va_atime.tv_nsec = tv[0].tv_usec * 1000; vattr->va_mtime.tv_sec = tv[1].tv_sec; vattr->va_mtime.tv_nsec = tv[1].tv_usec * 1000; *flags = ATTR_UTIME; } else { gethrestime(&vattr->va_atime); vattr->va_mtime = vattr->va_atime; *flags = 0; } vattr->va_mask = AT_ATIME | AT_MTIME; return (0); }
/* * PC Sampling */ long pcsample(void *buf, long nsamples) { struct proc *p = ttoproc(curthread); long count = 0; if (nsamples < 0 || ((get_udatamodel() != DATAMODEL_NATIVE) && (nsamples > INT32_MAX))) return (set_errno(EINVAL)); mutex_enter(&p->p_pflock); p->p_prof.pr_base = buf; p->p_prof.pr_size = nsamples; p->p_prof.pr_scale = 1; count = p->p_prof.pr_samples; p->p_prof.pr_samples = 0; mutex_exit(&p->p_pflock); mutex_enter(&p->p_lock); set_proc_post_sys(p); /* activate post_syscall profiling code */ mutex_exit(&p->p_lock); return (count); }
/*ARGSUSED*/ static int ipmi_ioctl(dev_t dv, int cmd, intptr_t data, int flags, cred_t *cr, int *rvalp) { struct ipmi_device *dev; struct ipmi_request *kreq; struct ipmi_req req; struct ipmi_recv recv; struct ipmi_recv32 recv32; struct ipmi_addr addr; int error, len; model_t model; int orig_cmd = 0; uchar_t t_lun; if (secpolicy_sys_config(cr, B_FALSE) != 0) return (EPERM); if ((dev = lookup_ipmidev_by_dev(dv)) == NULL) return (ENODEV); model = get_udatamodel(); if (model == DATAMODEL_NATIVE) { switch (cmd) { case IPMICTL_SEND_COMMAND: if (copyin((void *)data, &req, sizeof (req))) return (EFAULT); break; case IPMICTL_RECEIVE_MSG_TRUNC: case IPMICTL_RECEIVE_MSG: if (copyin((void *)data, &recv, sizeof (recv))) return (EFAULT); break; } } else { /* Convert 32-bit structures to native. */ struct ipmi_req32 req32; switch (cmd) { case IPMICTL_SEND_COMMAND_32: if (copyin((void *)data, &req32, sizeof (req32))) return (EFAULT); req.addr = PTRIN(req32.addr); req.addr_len = req32.addr_len; req.msgid = req32.msgid; req.msg.netfn = req32.msg.netfn; req.msg.cmd = req32.msg.cmd; req.msg.data_len = req32.msg.data_len; req.msg.data = PTRIN(req32.msg.data); cmd = IPMICTL_SEND_COMMAND; break; case IPMICTL_RECEIVE_MSG_TRUNC_32: case IPMICTL_RECEIVE_MSG_32: if (copyin((void *)data, &recv32, sizeof (recv32))) return (EFAULT); recv.addr = PTRIN(recv32.addr); recv.addr_len = recv32.addr_len; recv.msg.data_len = recv32.msg.data_len; recv.msg.data = PTRIN(recv32.msg.data); orig_cmd = cmd; cmd = (cmd == IPMICTL_RECEIVE_MSG_TRUNC_32) ? IPMICTL_RECEIVE_MSG_TRUNC : IPMICTL_RECEIVE_MSG; break; } } switch (cmd) { case IPMICTL_SEND_COMMAND: IPMI_LOCK(sc); /* clear out old stuff in queue of stuff done */ while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests)) != NULL) { TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); dev->ipmi_requests--; ipmi_free_request(kreq); } IPMI_UNLOCK(sc); /* Check that we didn't get a ridiculous length */ if (req.msg.data_len > IPMI_MAX_RX) return (EINVAL); kreq = ipmi_alloc_request(dev, req.msgid, IPMI_ADDR(req.msg.netfn, 0), req.msg.cmd, req.msg.data_len, IPMI_MAX_RX); /* This struct is the same for 32/64 */ if (req.msg.data_len > 0 && copyin(req.msg.data, kreq->ir_request, req.msg.data_len)) { ipmi_free_request(kreq); return (EFAULT); } IPMI_LOCK(sc); dev->ipmi_requests++; error = sc->ipmi_enqueue_request(sc, kreq); IPMI_UNLOCK(sc); if (error) return (error); break; case IPMICTL_RECEIVE_MSG_TRUNC: case IPMICTL_RECEIVE_MSG: /* This struct is the same for 32/64 */ if (copyin(recv.addr, &addr, sizeof (addr))) return (EFAULT); IPMI_LOCK(sc); kreq = TAILQ_FIRST(&dev->ipmi_completed_requests); if (kreq == NULL) { IPMI_UNLOCK(sc); return (EAGAIN); } addr.channel = IPMI_BMC_CHANNEL; recv.recv_type = IPMI_RESPONSE_RECV_TYPE; recv.msgid = kreq->ir_msgid; recv.msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2; recv.msg.cmd = kreq->ir_command; error = kreq->ir_error; if (error) { TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); dev->ipmi_requests--; IPMI_UNLOCK(sc); ipmi_free_request(kreq); return (error); } len = kreq->ir_replylen + 1; if (recv.msg.data_len < len && cmd == IPMICTL_RECEIVE_MSG) { IPMI_UNLOCK(sc); ipmi_free_request(kreq); return (EMSGSIZE); } TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link); dev->ipmi_requests--; IPMI_UNLOCK(sc); len = min(recv.msg.data_len, len); recv.msg.data_len = (unsigned short)len; if (orig_cmd == IPMICTL_RECEIVE_MSG_TRUNC_32 || orig_cmd == IPMICTL_RECEIVE_MSG_32) { /* Update changed fields in 32-bit structure. */ recv32.recv_type = recv.recv_type; recv32.msgid = (int32_t)recv.msgid; recv32.msg.netfn = recv.msg.netfn; recv32.msg.cmd = recv.msg.cmd; recv32.msg.data_len = recv.msg.data_len; error = copyout(&recv32, (void *)data, sizeof (recv32)); } else { error = copyout(&recv, (void *)data, sizeof (recv)); } /* This struct is the same for 32/64 */ if (error == 0) error = copyout(&addr, recv.addr, sizeof (addr)); if (error == 0) error = copyout(&kreq->ir_compcode, recv.msg.data, 1); if (error == 0) error = copyout(kreq->ir_reply, recv.msg.data + 1, len - 1); ipmi_free_request(kreq); if (error) return (EFAULT); break; case IPMICTL_SET_MY_ADDRESS_CMD: IPMI_LOCK(sc); if (copyin((void *)data, &dev->ipmi_address, sizeof (dev->ipmi_address))) { IPMI_UNLOCK(sc); return (EFAULT); } IPMI_UNLOCK(sc); break; case IPMICTL_GET_MY_ADDRESS_CMD: IPMI_LOCK(sc); if (copyout(&dev->ipmi_address, (void *)data, sizeof (dev->ipmi_address))) { IPMI_UNLOCK(sc); return (EFAULT); } IPMI_UNLOCK(sc); break; case IPMICTL_SET_MY_LUN_CMD: IPMI_LOCK(sc); if (copyin((void *)data, &t_lun, sizeof (t_lun))) { IPMI_UNLOCK(sc); return (EFAULT); } dev->ipmi_lun = t_lun & 0x3; IPMI_UNLOCK(sc); break; case IPMICTL_GET_MY_LUN_CMD: IPMI_LOCK(sc); if (copyout(&dev->ipmi_lun, (void *)data, sizeof (dev->ipmi_lun))) { IPMI_UNLOCK(sc); return (EFAULT); } IPMI_UNLOCK(sc); break; case IPMICTL_SET_GETS_EVENTS_CMD: break; case IPMICTL_REGISTER_FOR_CMD: case IPMICTL_UNREGISTER_FOR_CMD: return (EINVAL); default: return (EINVAL); } return (0); }
/* * System call to create an lwp. * * Notes on the LWP_DETACHED and LWP_DAEMON flags: * * A detached lwp (LWP_DETACHED) cannot be the specific target of * lwp_wait() (it is not joinable), but lwp_wait(0, ...) is required * to sleep until all non-daemon detached lwps have terminated before * returning EDEADLK because a detached lwp might create a non-detached lwp * that could then be returned by lwp_wait(0, ...). See also lwp_detach(). * * A daemon lwp (LWP_DAEMON) is a detached lwp that has the additional * property that it does not affect the termination condition of the * process: The last non-daemon lwp to call lwp_exit() causes the process * to exit and lwp_wait(0, ...) does not sleep waiting for daemon lwps * to terminate. See the block comment before lwp_wait(). */ int syslwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) { klwp_t *lwp; proc_t *p = ttoproc(curthread); kthread_t *t; ucontext_t uc; #ifdef _SYSCALL32_IMPL ucontext32_t uc32; #endif /* _SYSCALL32_IMPL */ k_sigset_t sigmask; int tid; model_t model = get_udatamodel(); uintptr_t thrptr = 0; if (flags & ~(LWP_DAEMON|LWP_DETACHED|LWP_SUSPENDED)) return (set_errno(EINVAL)); /* * lwp_create() is disallowed for the /proc agent lwp. */ if (curthread == p->p_agenttp) return (set_errno(ENOTSUP)); if (model == DATAMODEL_NATIVE) { if (copyin(ucp, &uc, sizeof (ucontext_t))) return (set_errno(EFAULT)); sigutok(&uc.uc_sigmask, &sigmask); #if defined(__i386) /* * libc stashed thrptr into unused kernel %sp. * See setup_context() in libc. */ thrptr = (uint32_t)uc.uc_mcontext.gregs[ESP]; #endif } #ifdef _SYSCALL32_IMPL else { if (copyin(ucp, &uc32, sizeof (ucontext32_t))) return (set_errno(EFAULT)); sigutok(&uc32.uc_sigmask, &sigmask); #if defined(__sparc) ucontext_32ton(&uc32, &uc, NULL, NULL); #else /* __amd64 */ ucontext_32ton(&uc32, &uc); /* * libc stashed thrptr into unused kernel %sp. * See setup_context() in libc. */ thrptr = (uint32_t)uc32.uc_mcontext.gregs[ESP]; #endif } #endif /* _SYSCALL32_IMPL */ (void) save_syscall_args(); /* save args for tracing first */ mutex_enter(&curproc->p_lock); pool_barrier_enter(); mutex_exit(&curproc->p_lock); lwp = lwp_create(lwp_rtt, NULL, NULL, curproc, TS_STOPPED, curthread->t_pri, &sigmask, curthread->t_cid, 0); mutex_enter(&curproc->p_lock); pool_barrier_exit(); mutex_exit(&curproc->p_lock); if (lwp == NULL) return (set_errno(EAGAIN)); lwp_load(lwp, uc.uc_mcontext.gregs, thrptr); t = lwptot(lwp); /* * Copy the new lwp's lwpid into the caller's specified buffer. */ if (new_lwp && copyout(&t->t_tid, new_lwp, sizeof (id_t))) { /* * caller's buffer is not writable, return * EFAULT, and terminate new lwp. */ mutex_enter(&p->p_lock); t->t_proc_flag |= TP_EXITLWP; t->t_sig_check = 1; t->t_sysnum = 0; t->t_proc_flag &= ~TP_HOLDLWP; lwp_create_done(t); mutex_exit(&p->p_lock); return (set_errno(EFAULT)); } /* * clone callers context, if any. must be invoked * while -not- holding p_lock. */ if (curthread->t_ctx) lwp_createctx(curthread, t); /* * copy current contract templates */ lwp_ctmpl_copy(lwp, ttolwp(curthread)); mutex_enter(&p->p_lock); /* * Copy the syscall arguments to the new lwp's arg area * for the benefit of debuggers. */ t->t_sysnum = SYS_lwp_create; lwp->lwp_ap = lwp->lwp_arg; lwp->lwp_arg[0] = (long)ucp; lwp->lwp_arg[1] = (long)flags; lwp->lwp_arg[2] = (long)new_lwp; lwp->lwp_argsaved = 1; if (!(flags & (LWP_DETACHED|LWP_DAEMON))) t->t_proc_flag |= TP_TWAIT; if (flags & LWP_DAEMON) { t->t_proc_flag |= TP_DAEMON; p->p_lwpdaemon++; } tid = (int)t->t_tid; /* for /proc debuggers */ /* * We now set the newly-created lwp running. * If it is being created as LWP_SUSPENDED, we leave its * TP_HOLDLWP flag set so it will stop in system call exit. */ if (!(flags & LWP_SUSPENDED)) t->t_proc_flag &= ~TP_HOLDLWP; lwp_create_done(t); mutex_exit(&p->p_lock); return (tid); }
static int setsmask(caddr_t data) { STRUCT_DECL(auditinfo, user_info); struct proc *p; const auditinfo_addr_t *ainfo; model_t model; /* setsmask not applicable in non-global zones without perzone policy */ if (!(audit_policy & AUDIT_PERZONE) && (!INGLOBALZONE(curproc))) return (EINVAL); model = get_udatamodel(); STRUCT_INIT(user_info, model); if (copyin(data, STRUCT_BUF(user_info), STRUCT_SIZE(user_info))) return (EFAULT); mutex_enter(&pidlock); /* lock the process queue against updates */ for (p = practive; p != NULL; p = p->p_next) { cred_t *cr; /* if in non-global zone only modify processes in same zone */ if (!HASZONEACCESS(curproc, p->p_zone->zone_id)) continue; mutex_enter(&p->p_lock); /* so process doesn't go away */ /* skip system processes and ones being created or going away */ if (p->p_stat == SIDL || p->p_stat == SZOMB || (p->p_flag & (SSYS | SEXITING | SEXITLWPS))) { mutex_exit(&p->p_lock); continue; } mutex_enter(&p->p_crlock); crhold(cr = p->p_cred); mutex_exit(&p->p_crlock); ainfo = crgetauinfo(cr); if (ainfo == NULL) { mutex_exit(&p->p_lock); crfree(cr); continue; } if (ainfo->ai_asid == STRUCT_FGET(user_info, ai_asid)) { au_mask_t mask; int err; /* * Here's a process which matches the specified asid. * If its mask doesn't already match the new mask, * save the new mask in the pad, to be picked up * next syscall. */ mask = STRUCT_FGET(user_info, ai_mask); err = bcmp(&mask, &ainfo->ai_mask, sizeof (au_mask_t)); crfree(cr); if (err != 0) { struct p_audit_data *pad = P2A(p); ASSERT(pad != NULL); mutex_enter(&(pad->pad_lock)); pad->pad_flags |= PAD_SETMASK; pad->pad_newmask = mask; mutex_exit(&(pad->pad_lock)); /* * No need to call set_proc_pre_sys(), since * t_pre_sys is ALWAYS on when audit is * enabled...due to syscall auditing. */ } } else { crfree(cr); } mutex_exit(&p->p_lock); } mutex_exit(&pidlock); return (0); }
static int setqctrl(caddr_t data) { au_kcontext_t *kctx; struct au_qctrl qctrl_tmp; STRUCT_DECL(au_qctrl, qctrl); STRUCT_INIT(qctrl, get_udatamodel()); if (!(audit_policy & AUDIT_PERZONE) && !INGLOBALZONE(curproc)) return (EINVAL); kctx = GET_KCTX_NGZ; if (copyin(data, STRUCT_BUF(qctrl), STRUCT_SIZE(qctrl))) return (EFAULT); qctrl_tmp.aq_hiwater = (size_t)STRUCT_FGET(qctrl, aq_hiwater); qctrl_tmp.aq_lowater = (size_t)STRUCT_FGET(qctrl, aq_lowater); qctrl_tmp.aq_bufsz = (size_t)STRUCT_FGET(qctrl, aq_bufsz); qctrl_tmp.aq_delay = (clock_t)STRUCT_FGET(qctrl, aq_delay); /* enforce sane values */ if (qctrl_tmp.aq_hiwater <= qctrl_tmp.aq_lowater) return (EINVAL); if (qctrl_tmp.aq_hiwater < AQ_LOWATER) return (EINVAL); if (qctrl_tmp.aq_hiwater > AQ_MAXHIGH) return (EINVAL); if (qctrl_tmp.aq_bufsz < AQ_BUFSZ) return (EINVAL); if (qctrl_tmp.aq_bufsz > AQ_MAXBUFSZ) return (EINVAL); if (qctrl_tmp.aq_delay == 0) return (EINVAL); if (qctrl_tmp.aq_delay > AQ_MAXDELAY) return (EINVAL); /* update everything at once so things are consistant */ mutex_enter(&(kctx->auk_queue.lock)); kctx->auk_queue.hiwater = qctrl_tmp.aq_hiwater; kctx->auk_queue.lowater = qctrl_tmp.aq_lowater; kctx->auk_queue.bufsz = qctrl_tmp.aq_bufsz; kctx->auk_queue.delay = qctrl_tmp.aq_delay; if (kctx->auk_queue.rd_block && kctx->auk_queue.cnt > kctx->auk_queue.lowater) cv_broadcast(&(kctx->auk_queue.read_cv)); if (kctx->auk_queue.wt_block && kctx->auk_queue.cnt < kctx->auk_queue.hiwater) cv_broadcast(&(kctx->auk_queue.write_cv)); mutex_exit(&(kctx->auk_queue.lock)); return (0); }
/* * the host address for AUDIT_PERZONE == 0 is that of the global * zone and for local zones it is of the current zone. */ static int setkaudit(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); model_t model; au_kcontext_t *kctx; if (!(audit_policy & AUDIT_PERZONE) && !INGLOBALZONE(curproc)) return (EINVAL); kctx = GET_KCTX_NGZ; model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info))) return (EFAULT); if ((STRUCT_FGET(info, ai_termid.at_type) != AU_IPv4) && (STRUCT_FGET(info, ai_termid.at_type) != AU_IPv6)) return (EINVAL); /* Set audit mask, termid and session id as specified */ kctx->auk_info.ai_auid = STRUCT_FGET(info, ai_auid); kctx->auk_info.ai_namask = STRUCT_FGET(info, ai_mask); #ifdef _LP64 /* only convert to 64 bit if coming from a 32 bit binary */ if (model == DATAMODEL_ILP32) kctx->auk_info.ai_termid.at_port = DEVEXPL(STRUCT_FGET(info, ai_termid.at_port)); else kctx->auk_info.ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #else kctx->auk_info.ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #endif kctx->auk_info.ai_termid.at_type = STRUCT_FGET(info, ai_termid.at_type); bzero(&kctx->auk_info.ai_termid.at_addr[0], sizeof (kctx->auk_info.ai_termid.at_addr)); kctx->auk_info.ai_termid.at_addr[0] = STRUCT_FGET(info, ai_termid.at_addr[0]); kctx->auk_info.ai_termid.at_addr[1] = STRUCT_FGET(info, ai_termid.at_addr[1]); kctx->auk_info.ai_termid.at_addr[2] = STRUCT_FGET(info, ai_termid.at_addr[2]); kctx->auk_info.ai_termid.at_addr[3] = STRUCT_FGET(info, ai_termid.at_addr[3]); kctx->auk_info.ai_asid = STRUCT_FGET(info, ai_asid); if (kctx->auk_info.ai_termid.at_type == AU_IPv6 && IN6_IS_ADDR_V4MAPPED( ((in6_addr_t *)kctx->auk_info.ai_termid.at_addr))) { kctx->auk_info.ai_termid.at_type = AU_IPv4; kctx->auk_info.ai_termid.at_addr[0] = kctx->auk_info.ai_termid.at_addr[3]; kctx->auk_info.ai_termid.at_addr[1] = 0; kctx->auk_info.ai_termid.at_addr[2] = 0; kctx->auk_info.ai_termid.at_addr[3] = 0; } if (kctx->auk_info.ai_termid.at_type == AU_IPv6) kctx->auk_hostaddr_valid = IN6_IS_ADDR_UNSPECIFIED( (in6_addr_t *)kctx->auk_info.ai_termid.at_addr) ? 0 : 1; else kctx->auk_hostaddr_valid = (kctx->auk_info.ai_termid.at_addr[0] == htonl(INADDR_ANY)) ? 0 : 1; return (0); }
/* * semctl - Semctl system call. */ static int semctl(int semid, uint_t semnum, int cmd, uintptr_t arg) { ksemid_t *sp; /* ptr to semaphore header */ struct sem *p; /* ptr to semaphore */ unsigned int i; /* loop control */ ushort_t *vals, *vp; size_t vsize = 0; int error = 0; int retval = 0; struct cred *cr; kmutex_t *lock; model_t mdl = get_udatamodel(); STRUCT_DECL(semid_ds, sid); struct semid_ds64 ds64; STRUCT_INIT(sid, mdl); cr = CRED(); /* * Perform pre- or non-lookup actions (e.g. copyins, RMID). */ switch (cmd) { case IPC_SET: if (copyin((void *)arg, STRUCT_BUF(sid), STRUCT_SIZE(sid))) return (set_errno(EFAULT)); break; case IPC_SET64: if (copyin((void *)arg, &ds64, sizeof (struct semid_ds64))) return (set_errno(EFAULT)); break; case SETALL: if ((lock = ipc_lookup(sem_svc, semid, (kipc_perm_t **)&sp)) == NULL) return (set_errno(EINVAL)); vsize = sp->sem_nsems * sizeof (*vals); mutex_exit(lock); /* allocate space to hold all semaphore values */ vals = kmem_alloc(vsize, KM_SLEEP); if (copyin((void *)arg, vals, vsize)) { kmem_free(vals, vsize); return (set_errno(EFAULT)); } break; case IPC_RMID: if (error = ipc_rmid(sem_svc, semid, cr)) return (set_errno(error)); return (0); } if ((lock = ipc_lookup(sem_svc, semid, (kipc_perm_t **)&sp)) == NULL) { if (vsize != 0) kmem_free(vals, vsize); return (set_errno(EINVAL)); } switch (cmd) { /* Set ownership and permissions. */ case IPC_SET: if (error = ipcperm_set(sem_svc, cr, &sp->sem_perm, &STRUCT_BUF(sid)->sem_perm, mdl)) { mutex_exit(lock); return (set_errno(error)); } sp->sem_ctime = gethrestime_sec(); mutex_exit(lock); return (0); /* Get semaphore data structure. */ case IPC_STAT: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } ipcperm_stat(&STRUCT_BUF(sid)->sem_perm, &sp->sem_perm, mdl); STRUCT_FSETP(sid, sem_base, NULL); /* kernel addr */ STRUCT_FSET(sid, sem_nsems, sp->sem_nsems); STRUCT_FSET(sid, sem_otime, sp->sem_otime); STRUCT_FSET(sid, sem_ctime, sp->sem_ctime); STRUCT_FSET(sid, sem_binary, sp->sem_binary); mutex_exit(lock); if (copyout(STRUCT_BUF(sid), (void *)arg, STRUCT_SIZE(sid))) return (set_errno(EFAULT)); return (0); case IPC_SET64: if (error = ipcperm_set64(sem_svc, cr, &sp->sem_perm, &ds64.semx_perm)) { mutex_exit(lock); return (set_errno(error)); } sp->sem_ctime = gethrestime_sec(); mutex_exit(lock); return (0); case IPC_STAT64: ipcperm_stat64(&ds64.semx_perm, &sp->sem_perm); ds64.semx_nsems = sp->sem_nsems; ds64.semx_otime = sp->sem_otime; ds64.semx_ctime = sp->sem_ctime; mutex_exit(lock); if (copyout(&ds64, (void *)arg, sizeof (struct semid_ds64))) return (set_errno(EFAULT)); return (0); /* Get # of processes sleeping for greater semval. */ case GETNCNT: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].semncnt; mutex_exit(lock); return (retval); /* Get pid of last process to operate on semaphore. */ case GETPID: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].sempid; mutex_exit(lock); return (retval); /* Get semval of one semaphore. */ case GETVAL: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].semval; mutex_exit(lock); return (retval); /* Get all semvals in set. */ case GETALL: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } /* allocate space to hold all semaphore values */ vsize = sp->sem_nsems * sizeof (*vals); vals = vp = kmem_alloc(vsize, KM_SLEEP); for (i = sp->sem_nsems, p = sp->sem_base; i--; p++, vp++) bcopy(&p->semval, vp, sizeof (p->semval)); mutex_exit(lock); if (copyout((void *)vals, (void *)arg, vsize)) { kmem_free(vals, vsize); return (set_errno(EFAULT)); } kmem_free(vals, vsize); return (0); /* Get # of processes sleeping for semval to become zero. */ case GETZCNT: if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } retval = sp->sem_base[semnum].semzcnt; mutex_exit(lock); return (retval); /* Set semval of one semaphore. */ case SETVAL: if (error = ipcperm_access(&sp->sem_perm, SEM_A, cr)) { mutex_exit(lock); return (set_errno(error)); } if (semnum >= sp->sem_nsems) { mutex_exit(lock); return (set_errno(EINVAL)); } if ((uint_t)arg > USHRT_MAX) { mutex_exit(lock); return (set_errno(ERANGE)); } p = &sp->sem_base[semnum]; if ((p->semval = (ushort_t)arg) != 0) { if (p->semncnt) { cv_broadcast(&p->semncnt_cv); } } else if (p->semzcnt) { cv_broadcast(&p->semzcnt_cv); } p->sempid = curproc->p_pid; sem_undo_clear(sp, (ushort_t)semnum, (ushort_t)semnum); mutex_exit(lock); return (0); /* Set semvals of all semaphores in set. */ case SETALL: /* Check if semaphore set has been deleted and reallocated. */ if (sp->sem_nsems * sizeof (*vals) != vsize) { error = set_errno(EINVAL); goto seterr; } if (error = ipcperm_access(&sp->sem_perm, SEM_A, cr)) { error = set_errno(error); goto seterr; } sem_undo_clear(sp, 0, sp->sem_nsems - 1); for (i = 0, p = sp->sem_base; i < sp->sem_nsems; (p++)->sempid = curproc->p_pid) { if ((p->semval = vals[i++]) != 0) { if (p->semncnt) { cv_broadcast(&p->semncnt_cv); } } else if (p->semzcnt) { cv_broadcast(&p->semzcnt_cv); } } seterr: mutex_exit(lock); kmem_free(vals, vsize); return (error); default: mutex_exit(lock); return (set_errno(EINVAL)); } /* NOTREACHED */ }
/* * Set the audit state information for the current process. * Return EFAULT if copyin fails. */ int setaudit_addr(caddr_t info_p, int len) { STRUCT_DECL(auditinfo_addr, info); proc_t *p; cred_t *newcred; model_t model; int i; int type; auditinfo_addr_t *ainfo; if (secpolicy_audit_config(CRED()) != 0) return (EPERM); model = get_udatamodel(); STRUCT_INIT(info, model); if (len < STRUCT_SIZE(info)) return (EOVERFLOW); if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info))) return (EFAULT); type = STRUCT_FGET(info, ai_termid.at_type); if ((type != AU_IPv4) && (type != AU_IPv6)) return (EINVAL); newcred = cralloc(); if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) { crfree(newcred); return (EINVAL); } /* grab p_crlock and switch to new cred */ p = curproc; mutex_enter(&p->p_crlock); crcopy_to(p->p_cred, newcred); p->p_cred = newcred; /* Set audit mask, id, termid and session id as specified */ ainfo->ai_auid = STRUCT_FGET(info, ai_auid); ainfo->ai_mask = STRUCT_FGET(info, ai_mask); #ifdef _LP64 /* only convert to 64 bit if coming from a 32 bit binary */ if (model == DATAMODEL_ILP32) ainfo->ai_termid.at_port = DEVEXPL(STRUCT_FGET(info, ai_termid.at_port)); else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #else ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port); #endif ainfo->ai_termid.at_type = type; bzero(&ainfo->ai_termid.at_addr[0], sizeof (ainfo->ai_termid.at_addr)); for (i = 0; i < (type/sizeof (int)); i++) ainfo->ai_termid.at_addr[i] = STRUCT_FGET(info, ai_termid.at_addr[i]); if (ainfo->ai_termid.at_type == AU_IPv6 && IN6_IS_ADDR_V4MAPPED(((in6_addr_t *)ainfo->ai_termid.at_addr))) { ainfo->ai_termid.at_type = AU_IPv4; ainfo->ai_termid.at_addr[0] = ainfo->ai_termid.at_addr[3]; ainfo->ai_termid.at_addr[1] = 0; ainfo->ai_termid.at_addr[2] = 0; ainfo->ai_termid.at_addr[3] = 0; } ainfo->ai_asid = STRUCT_FGET(info, ai_asid); /* unlock and broadcast the cred changes */ mutex_exit(&p->p_crlock); crset(p, newcred); return (0); }
int adjtime(struct timeval *delta, struct timeval *olddelta) { struct timeval atv, oatv; int64_t ndelta; int64_t old_delta; int s; model_t datamodel = get_udatamodel(); if (secpolicy_settime(CRED()) != 0) return (set_errno(EPERM)); if (datamodel == DATAMODEL_NATIVE) { if (copyin(delta, &atv, sizeof (atv))) return (set_errno(EFAULT)); } else { struct timeval32 atv32; if (copyin(delta, &atv32, sizeof (atv32))) return (set_errno(EFAULT)); TIMEVAL32_TO_TIMEVAL(&atv, &atv32); } if (atv.tv_usec <= -MICROSEC || atv.tv_usec >= MICROSEC) return (set_errno(EINVAL)); /* * The SVID specifies that if delta is 0, then there is * no effect upon time correction, just return olddelta. */ ndelta = (int64_t)atv.tv_sec * NANOSEC + atv.tv_usec * 1000; mutex_enter(&tod_lock); s = hr_clock_lock(); old_delta = timedelta; if (ndelta) timedelta = ndelta; /* * Always set tod_needsync on all adjtime() calls, since it implies * someone is watching over us and keeping the local clock in sync. */ tod_needsync = 1; hr_clock_unlock(s); mutex_exit(&tod_lock); if (olddelta) { oatv.tv_sec = old_delta / NANOSEC; oatv.tv_usec = (old_delta % NANOSEC) / 1000; if (datamodel == DATAMODEL_NATIVE) { if (copyout(&oatv, olddelta, sizeof (oatv))) return (set_errno(EFAULT)); } else { struct timeval32 oatv32; if (TIMEVAL_OVERFLOW(&oatv)) return (set_errno(EOVERFLOW)); TIMEVAL_TO_TIMEVAL32(&oatv32, &oatv); if (copyout(&oatv32, olddelta, sizeof (oatv32))) return (set_errno(EFAULT)); } } return (0); }
static int getpinfo_addr(caddr_t data, int len) { STRUCT_DECL(auditpinfo_addr, apinfo); proc_t *proc; const auditinfo_addr_t *ainfo; model_t model; cred_t *cr, *newcred; model = get_udatamodel(); STRUCT_INIT(apinfo, model); if (len < STRUCT_SIZE(apinfo)) return (EOVERFLOW); if (copyin(data, STRUCT_BUF(apinfo), STRUCT_SIZE(apinfo))) return (EFAULT); newcred = cralloc(); mutex_enter(&pidlock); if ((proc = prfind(STRUCT_FGET(apinfo, ap_pid))) == NULL) { mutex_exit(&pidlock); crfree(newcred); return (ESRCH); } mutex_enter(&proc->p_lock); /* so process doesn't go away */ mutex_exit(&pidlock); audit_update_context(proc, newcred); /* make sure it's up-to-date */ mutex_enter(&proc->p_crlock); crhold(cr = proc->p_cred); mutex_exit(&proc->p_crlock); mutex_exit(&proc->p_lock); ainfo = crgetauinfo(cr); if (ainfo == NULL) { crfree(cr); return (EINVAL); } STRUCT_FSET(apinfo, ap_auid, ainfo->ai_auid); STRUCT_FSET(apinfo, ap_asid, ainfo->ai_asid); #ifdef _LP64 if (model == DATAMODEL_ILP32) { dev32_t dev; /* convert internal 64 bit form to 32 bit version */ if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) { crfree(cr); return (EOVERFLOW); } STRUCT_FSET(apinfo, ap_termid.at_port, dev); } else STRUCT_FSET(apinfo, ap_termid.at_port, ainfo->ai_termid.at_port); #else STRUCT_FSET(apinfo, ap_termid.at_port, ainfo->ai_termid.at_port); #endif STRUCT_FSET(apinfo, ap_termid.at_type, ainfo->ai_termid.at_type); STRUCT_FSET(apinfo, ap_termid.at_addr[0], ainfo->ai_termid.at_addr[0]); STRUCT_FSET(apinfo, ap_termid.at_addr[1], ainfo->ai_termid.at_addr[1]); STRUCT_FSET(apinfo, ap_termid.at_addr[2], ainfo->ai_termid.at_addr[2]); STRUCT_FSET(apinfo, ap_termid.at_addr[3], ainfo->ai_termid.at_addr[3]); STRUCT_FSET(apinfo, ap_mask, ainfo->ai_mask); crfree(cr); if (copyout(STRUCT_BUF(apinfo), data, STRUCT_SIZE(apinfo))) return (EFAULT); return (0); }
/* * msgctl system call. * * gets q lock (via ipc_lookup), releases before return. * may call users of msg_lock */ static int msgctl(int msgid, int cmd, void *arg) { STRUCT_DECL(msqid_ds, ds); /* SVR4 queue work area */ kmsqid_t *qp; /* ptr to associated q */ int error; struct cred *cr; model_t mdl = get_udatamodel(); struct msqid_ds64 ds64; kmutex_t *lock; proc_t *pp = curproc; STRUCT_INIT(ds, mdl); cr = CRED(); /* * Perform pre- or non-lookup actions (e.g. copyins, RMID). */ switch (cmd) { case IPC_SET: if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) return (set_errno(EFAULT)); break; case IPC_SET64: if (copyin(arg, &ds64, sizeof (struct msqid_ds64))) return (set_errno(EFAULT)); break; case IPC_RMID: if (error = ipc_rmid(msq_svc, msgid, cr)) return (set_errno(error)); return (0); } /* * get msqid_ds for this msgid */ if ((lock = ipc_lookup(msq_svc, msgid, (kipc_perm_t **)&qp)) == NULL) return (set_errno(EINVAL)); switch (cmd) { case IPC_SET: if (STRUCT_FGET(ds, msg_qbytes) > qp->msg_qbytes && secpolicy_ipc_config(cr) != 0) { mutex_exit(lock); return (set_errno(EPERM)); } if (error = ipcperm_set(msq_svc, cr, &qp->msg_perm, &STRUCT_BUF(ds)->msg_perm, mdl)) { mutex_exit(lock); return (set_errno(error)); } qp->msg_qbytes = STRUCT_FGET(ds, msg_qbytes); qp->msg_ctime = gethrestime_sec(); break; case IPC_STAT: if (error = ipcperm_access(&qp->msg_perm, MSG_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (qp->msg_rcv_cnt) qp->msg_perm.ipc_mode |= MSG_RWAIT; if (qp->msg_snd_cnt) qp->msg_perm.ipc_mode |= MSG_WWAIT; ipcperm_stat(&STRUCT_BUF(ds)->msg_perm, &qp->msg_perm, mdl); qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); STRUCT_FSETP(ds, msg_first, NULL); /* kernel addr */ STRUCT_FSETP(ds, msg_last, NULL); STRUCT_FSET(ds, msg_cbytes, qp->msg_cbytes); STRUCT_FSET(ds, msg_qnum, qp->msg_qnum); STRUCT_FSET(ds, msg_qbytes, qp->msg_qbytes); STRUCT_FSET(ds, msg_lspid, qp->msg_lspid); STRUCT_FSET(ds, msg_lrpid, qp->msg_lrpid); STRUCT_FSET(ds, msg_stime, qp->msg_stime); STRUCT_FSET(ds, msg_rtime, qp->msg_rtime); STRUCT_FSET(ds, msg_ctime, qp->msg_ctime); break; case IPC_SET64: mutex_enter(&pp->p_lock); if ((ds64.msgx_qbytes > qp->msg_qbytes) && secpolicy_ipc_config(cr) != 0 && rctl_test(rc_process_msgmnb, pp->p_rctls, pp, ds64.msgx_qbytes, RCA_SAFE) & RCT_DENY) { mutex_exit(&pp->p_lock); mutex_exit(lock); return (set_errno(EPERM)); } mutex_exit(&pp->p_lock); if (error = ipcperm_set64(msq_svc, cr, &qp->msg_perm, &ds64.msgx_perm)) { mutex_exit(lock); return (set_errno(error)); } qp->msg_qbytes = ds64.msgx_qbytes; qp->msg_ctime = gethrestime_sec(); break; case IPC_STAT64: if (qp->msg_rcv_cnt) qp->msg_perm.ipc_mode |= MSG_RWAIT; if (qp->msg_snd_cnt) qp->msg_perm.ipc_mode |= MSG_WWAIT; ipcperm_stat64(&ds64.msgx_perm, &qp->msg_perm); qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); ds64.msgx_cbytes = qp->msg_cbytes; ds64.msgx_qnum = qp->msg_qnum; ds64.msgx_qbytes = qp->msg_qbytes; ds64.msgx_lspid = qp->msg_lspid; ds64.msgx_lrpid = qp->msg_lrpid; ds64.msgx_stime = qp->msg_stime; ds64.msgx_rtime = qp->msg_rtime; ds64.msgx_ctime = qp->msg_ctime; break; default: mutex_exit(lock); return (set_errno(EINVAL)); } mutex_exit(lock); /* * Do copyout last (after releasing mutex). */ switch (cmd) { case IPC_STAT: if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) return (set_errno(EFAULT)); break; case IPC_STAT64: if (copyout(&ds64, arg, sizeof (struct msqid_ds64))) return (set_errno(EFAULT)); break; } return (0); }