static ephemeral_zsd_t * get_ephemeral_zsd(zone_t *zone) { ephemeral_zsd_t *eph_zsd; eph_zsd = zone_getspecific(ephemeral_zone_key, zone); if (eph_zsd != NULL) { return (eph_zsd); } mutex_enter(&ephemeral_zone_mutex); eph_zsd = zone_getspecific(ephemeral_zone_key, zone); if (eph_zsd == NULL) { eph_zsd = kmem_zalloc(sizeof (ephemeral_zsd_t), KM_SLEEP); eph_zsd->min_uid = MAXUID; eph_zsd->last_uid = IDMAP_WK__MAX_UID; eph_zsd->min_gid = MAXUID; eph_zsd->last_gid = IDMAP_WK__MAX_GID; mutex_init(&eph_zsd->eph_lock, NULL, MUTEX_DEFAULT, NULL); /* * nobody is used to map SID containing CRs. */ eph_zsd->eph_nobody = crdup(zone->zone_kcred); (void) crsetugid(eph_zsd->eph_nobody, UID_NOBODY, GID_NOBODY); CR_FLAGS(eph_zsd->eph_nobody) = 0; eph_zsd->eph_nobody->cr_zone = zone; (void) zone_setspecific(ephemeral_zone_key, zone, eph_zsd); } mutex_exit(&ephemeral_zone_mutex); return (eph_zsd); }
void exacct_commit_task(void *arg) { task_t *tk = (task_t *)arg; size_t size; zone_t *zone = tk->tk_zone; struct exacct_globals *acg; ASSERT(tk != task0p); ASSERT(tk->tk_memb_list == NULL); /* * Don't do any extra work if the acctctl module isn't loaded. */ if (exacct_zone_key != ZONE_KEY_UNINITIALIZED) { acg = zone_getspecific(exacct_zone_key, zone); (void) exacct_assemble_task_usage(&acg->ac_task, tk, exacct_commit_callback, NULL, 0, &size, EW_FINAL); if (tk->tk_zone != global_zone) { acg = zone_getspecific(exacct_zone_key, global_zone); (void) exacct_assemble_task_usage(&acg->ac_task, tk, exacct_commit_callback, NULL, 0, &size, EW_FINAL); } } /* * Release associated project and finalize task. */ task_end(tk); }
/* * Give current process the default core settings for its current zone; * used for processes entering a zone via zone_enter. */ void set_core_defaults(void) { proc_t *p = curproc; struct core_globals *cg; corectl_path_t *oldpath, *newpath; corectl_content_t *oldcontent, *newcontent; cg = zone_getspecific(core_zone_key, p->p_zone); /* make local copies of default values to protect against change */ newpath = cg->core_default_path; newcontent = cg->core_default_content; corectl_path_hold(newpath); corectl_content_hold(newcontent); mutex_enter(&p->p_lock); oldpath = p->p_corefile; p->p_corefile = newpath; oldcontent = p->p_content; p->p_content = newcontent; mutex_exit(&p->p_lock); if (oldpath != NULL) corectl_path_rele(oldpath); if (oldcontent != NULL) corectl_content_rele(oldcontent); }
/* * Called from start_init_common(), to set init's core file path and content. */ void init_core(void) { struct core_globals *cg; /* * The first time we hit this, in the global zone, we have to * initialize the zsd key. */ if (INGLOBALZONE(curproc)) { zone_key_create(&core_zone_key, core_init_zone, NULL, core_free_zone); } /* * zone_key_create will have called core_init_zone for the * global zone, which sets up the default path and content * variables. */ VERIFY((cg = zone_getspecific(core_zone_key, curproc->p_zone)) != NULL); corectl_path_hold(cg->core_default_path); corectl_content_hold(cg->core_default_content); curproc->p_corefile = cg->core_default_path; curproc->p_content = cg->core_default_content; }
static int wracct(idtype_t idtype, id_t id, int flags) { int error; size_t size = 0; struct exacct_globals *acg; /* * Validate flags. */ switch (flags) { case EW_PARTIAL: case EW_INTERVAL: break; default: return (set_errno(EINVAL)); } acg = zone_getspecific(exacct_zone_key, curproc->p_zone); switch (idtype) { case P_PID: if (flags == EW_INTERVAL) return (set_errno(ENOTSUP)); error = wracct_proc(&acg->ac_proc, id, flags, &size); break; case P_TASKID: error = wracct_task(&acg->ac_task, id, flags, &size); break; default: error = EINVAL; break; } return (error == 0 ? error : set_errno(error)); }
/* ARGSUSED */ int svc_clts_kcreate(file_t *fp, uint_t sendsz, struct T_info_ack *tinfo, SVCMASTERXPRT **nxprt) { SVCMASTERXPRT *xprt; struct rpcstat *rpcstat; if (nxprt == NULL) return (EINVAL); rpcstat = zone_getspecific(rpcstat_zone_key, curproc->p_zone); ASSERT(rpcstat != NULL); xprt = kmem_zalloc(sizeof (*xprt), KM_SLEEP); xprt->xp_p2 = (caddr_t)rpcstat->rpc_clts_server; xprt->xp_ops = &svc_clts_op; xprt->xp_msg_size = tinfo->TSDU_size; xprt->xp_rtaddr.buf = NULL; xprt->xp_rtaddr.maxlen = tinfo->ADDR_size; xprt->xp_rtaddr.len = 0; *nxprt = xprt; return (0); }
/* * NLM_FREE_ALL, NLM4_FREE_ALL * * Destroy all lock state for the calling client. */ void nlm_do_free_all(nlm4_notify *argp, void *res, struct svc_req *sr) { struct nlm_globals *g; struct nlm_host_list host_list; struct nlm_host *hostp; TAILQ_INIT(&host_list); g = zone_getspecific(nlm_zone_key, curzone); /* Serialize calls to clean locks. */ mutex_enter(&g->clean_lock); /* * Find all hosts that have the given node name and put them on a * local list. */ mutex_enter(&g->lock); for (hostp = avl_first(&g->nlm_hosts_tree); hostp != NULL; hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp)) { if (strcasecmp(hostp->nh_name, argp->name) == 0) { /* * If needed take the host out of the idle list since * we are taking a reference. */ if (hostp->nh_flags & NLM_NH_INIDLE) { TAILQ_REMOVE(&g->nlm_idle_hosts, hostp, nh_link); hostp->nh_flags &= ~NLM_NH_INIDLE; } hostp->nh_refs++; TAILQ_INSERT_TAIL(&host_list, hostp, nh_link); } } mutex_exit(&g->lock); /* Free locks for all hosts on the local list. */ while (!TAILQ_EMPTY(&host_list)) { hostp = TAILQ_FIRST(&host_list); TAILQ_REMOVE(&host_list, hostp, nh_link); /* * Note that this does not do client-side cleanup. * We want to do that ONLY if statd tells us the * server has restarted. */ nlm_host_notify_server(hostp, argp->state); nlm_host_release(g, hostp); } mutex_exit(&g->clean_lock); (void) res; (void) sr; }
/* * void exacct_commit_proc(proc_t *, int) * * Overview * exacct_commit_proc() calculates the final usage for a process, updating the * task usage if task accounting is active, and writing a process record if * process accounting is active. exacct_commit_proc() is intended for being * called from proc_exit(). * * Return values * None. * * Caller's context * Suitable for KM_SLEEP allocations. p_lock must not be held at entry. */ void exacct_commit_proc(proc_t *p, int wstat) { zone_t *zone = p->p_zone; struct exacct_globals *acg, *gacg = NULL; if (exacct_zone_key == ZONE_KEY_UNINITIALIZED) { /* * acctctl module not loaded. Nothing to do. */ return; } acg = zone_getspecific(exacct_zone_key, zone); exacct_do_commit_proc(&acg->ac_proc, p, wstat); if (zone != global_zone) { gacg = zone_getspecific(exacct_zone_key, global_zone); exacct_do_commit_proc(&gacg->ac_proc, p, wstat); } }
static int putacct(idtype_t idtype, id_t id, void *buf, size_t bufsize, int flags) { int error; taskid_t tkid; proc_t *p; task_t *tk; void *kbuf; struct exacct_globals *acg; if (bufsize == 0 || bufsize > EXACCT_MAX_BUFSIZE) return (set_errno(EINVAL)); kbuf = kmem_alloc(bufsize, KM_SLEEP); if (copyin(buf, kbuf, bufsize) != 0) { error = EFAULT; goto out; } acg = zone_getspecific(exacct_zone_key, curproc->p_zone); switch (idtype) { case P_PID: mutex_enter(&pidlock); if ((p = prfind(id)) == NULL) { mutex_exit(&pidlock); error = ESRCH; } else { zone_t *zone = p->p_zone; tkid = p->p_task->tk_tkid; zone_hold(zone); mutex_exit(&pidlock); error = exacct_tag_proc(&acg->ac_proc, id, tkid, kbuf, bufsize, flags, zone->zone_nodename); zone_rele(zone); } break; case P_TASKID: if ((tk = task_hold_by_id(id)) != NULL) { error = exacct_tag_task(&acg->ac_task, tk, kbuf, bufsize, flags); task_rele(tk); } else { error = ESRCH; } break; default: error = EINVAL; break; } out: kmem_free(kbuf, bufsize); return (error == 0 ? error : set_errno(error)); }
/* * NLM_UNSHARE, NLM4_UNSHARE * * Release a DOS-style share reservation */ void nlm_do_unshare(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr) { struct nlm_globals *g; struct nlm_host *host; struct netbuf *addr; vnode_t *vp = NULL; char *netid; int error; struct shrlock shr; nlm_copy_netobj(&resp->cookie, &argp->cookie); netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_find(g, netid, addr); if (host == NULL) { resp->stat = nlm4_denied_nolocks; return; } DTRACE_PROBE3(unshare__start, struct nlm_globals *, g, struct nlm_host *, host, nlm4_shareargs *, argp); if (NLM_IN_GRACE(g)) { resp->stat = nlm4_denied_grace_period; goto out; } vp = nlm_fh_to_vp(&argp->share.fh); if (vp == NULL) { resp->stat = nlm4_stale_fh; goto out; } /* Convert to local form. */ nlm_init_shrlock(&shr, &argp->share, host); error = VOP_SHRLOCK(vp, F_UNSHARE, &shr, FREAD | FWRITE, CRED(), NULL); (void) error; resp->stat = nlm4_granted; out: DTRACE_PROBE3(unshare__end, struct nlm_globals *, g, struct nlm_host *, host, nlm4_shareres *, resp); if (vp != NULL) VN_RELE(vp); nlm_host_release(g, host); }
/* * Set the fields in the 'target' clone to the specified values. * Then, look at all clones to determine which message types are * currently active and which clone is the primary console queue. * If the primary console queue changes to or from the backlog * queue, copy all messages from backlog to primary or vice versa. */ void log_update(log_t *target, queue_t *q, short flags, log_filter_t *filter) { log_t *lp; short active = SL_CONSOLE; zone_t *zptr = NULL; log_zone_t *lzp; zoneid_t zoneid = target->log_zoneid; int i; log_enter(); if (q != NULL) target->log_q = q; target->log_wanted = filter; target->log_flags = flags; target->log_overflow = 0; /* * Need to special case the global zone here since this may be * called before zone_init. */ if (zoneid == GLOBAL_ZONEID) { lzp = &log_global; } else if ((zptr = zone_find_by_id(zoneid)) == NULL) { log_exit(); return; /* zone is being destroyed, ignore update */ } else { lzp = zone_getspecific(log_zone_key, zptr); } ASSERT(lzp != NULL); for (i = LOG_LOGMAXIDX; i >= LOG_LOGMINIDX; i--) { lp = &lzp->lz_clones[i]; if (zoneid == GLOBAL_ZONEID && (lp->log_flags & SL_CONSOLE)) log_consq = lp->log_q; active |= lp->log_flags; } lzp->lz_active = active; if (zptr) zone_rele(zptr); if (log_consq == target->log_q) { if (flags & SL_CONSOLE) log_conswitch(&log_backlog, target); else log_conswitch(target, &log_backlog); } target->log_q = q; log_exit(); }
/* * NLM_GRANTED, NLM_GRANTED_MSG, * NLM4_GRANTED, NLM4_GRANTED_MSG, * * This service routine is special. It's the only one that's * really part of our NLM _client_ support, used by _servers_ * to "call back" when a blocking lock from this NLM client * is granted by the server. In this case, we _know_ there is * already an nlm_host allocated and held by the client code. * We want to find that nlm_host here. * * Over in nlm_call_lock(), the client encoded the sysid for this * server in the "owner handle" netbuf sent with our lock request. * We can now use that to find the nlm_host object we used there. * (NB: The owner handle is opaque to the server.) */ void nlm_do_granted(nlm4_testargs *argp, nlm4_res *resp, struct svc_req *sr, nlm_res_cb cb) { struct nlm_globals *g; struct nlm_owner_handle *oh; struct nlm_host *host; nlm_rpc_t *rpcp = NULL; int error; nlm_copy_netobj(&resp->cookie, &argp->cookie); resp->stat.stat = nlm4_denied; g = zone_getspecific(nlm_zone_key, curzone); oh = (void *) argp->alock.oh.n_bytes; if (oh == NULL) return; host = nlm_host_find_by_sysid(g, oh->oh_sysid); if (host == NULL) return; if (cb != NULL) { error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp); if (error != 0) goto out; } if (NLM_IN_GRACE(g)) { resp->stat.stat = nlm4_denied_grace_period; goto out; } error = nlm_slock_grant(g, host, &argp->alock); if (error == 0) resp->stat.stat = nlm4_granted; out: /* * If we have a callback function, use that to * deliver the response via another RPC call. */ if (cb != NULL && rpcp != NULL) NLM_INVOKE_CALLBACK("do_granted", rpcp, resp, cb); if (rpcp != NULL) nlm_host_rele_rpc(host, rpcp); nlm_host_release(g, host); }
/* ARGSUSED */ void nlm_do_notify1(nlm_sm_status *argp, void *res, struct svc_req *sr) { struct nlm_globals *g; struct nlm_host *host; uint16_t sysid; g = zone_getspecific(nlm_zone_key, curzone); bcopy(&argp->priv, &sysid, sizeof (sysid)); DTRACE_PROBE2(nsm__notify, uint16_t, sysid, int, argp->state); host = nlm_host_find_by_sysid(g, (sysid_t)sysid); if (host == NULL) return; nlm_host_notify_server(host, argp->state); nlm_host_notify_client(host, argp->state); nlm_host_release(g, host); }
/* * Allocate a log device corresponding to supplied device type. * Both devices are clonable. /dev/log devices are allocated per zone. * /dev/conslog devices are allocated from kmem cache. */ log_t * log_alloc(minor_t type) { zone_t *zptr = curproc->p_zone; log_zone_t *lzp; log_t *lp; int i; minor_t minor; if (type == LOG_CONSMIN) { /* * Return a write-only /dev/conslog device. * No point allocating log_t until there's a free minor number. */ minor = (minor_t)id_alloc(log_minorspace); lp = kmem_cache_alloc(log_cons_cache, KM_SLEEP); lp->log_minor = minor; return (lp); } else { ASSERT(type == LOG_LOGMIN); lzp = zone_getspecific(log_zone_key, zptr); ASSERT(lzp != NULL); /* search for an available /dev/log device for the zone */ for (i = LOG_LOGMINIDX; i <= LOG_LOGMAXIDX; i++) { lp = &lzp->lz_clones[i]; if (lp->log_inuse == 0) break; } if (i > LOG_LOGMAXIDX) lp = NULL; else /* Indicate which device type */ lp->log_major = LOG_LOGMIN; return (lp); } }
void exacct_commit_flow(void *arg) { flow_usage_t *f = (flow_usage_t *)arg; size_t size; ulong_t mask[AC_MASK_SZ]; struct exacct_globals *acg; ac_info_t *ac_flow; if (exacct_zone_key == ZONE_KEY_UNINITIALIZED) { /* * acctctl module not loaded. Nothing to do. */ return; } /* * Even though each zone nominally has its own flow accounting settings * (ac_flow), these are only maintained by and for the global zone. * * If this were to change in the future, this function should grow a * second zoneid (or zone) argument, and use the corresponding zone's * settings rather than always using those of the global zone. */ acg = zone_getspecific(exacct_zone_key, global_zone); ac_flow = &acg->ac_flow; mutex_enter(&ac_flow->ac_lock); if (ac_flow->ac_state == AC_OFF) { mutex_exit(&ac_flow->ac_lock); return; } bt_copy(&ac_flow->ac_mask[0], mask, AC_MASK_SZ); mutex_exit(&ac_flow->ac_lock); (void) exacct_assemble_flow_usage(ac_flow, f, exacct_commit_callback, NULL, 0, &size); }
static ssize_t getacct(idtype_t idtype, id_t id, void *buf, size_t bufsize) { size_t size = 0; int error; struct exacct_globals *acg; if (bufsize > EXACCT_MAX_BUFSIZE) bufsize = EXACCT_MAX_BUFSIZE; acg = zone_getspecific(exacct_zone_key, curproc->p_zone); switch (idtype) { case P_PID: error = getacct_proc(&acg->ac_proc, id, buf, bufsize, &size); break; case P_TASKID: error = getacct_task(&acg->ac_task, id, buf, bufsize, &size); break; default: error = EINVAL; break; } return (error == 0 ? (ssize_t)size : set_errno(error)); }
void log_sendmsg(mblk_t *mp, zoneid_t zoneid) { log_t *lp; char *src, *dst; mblk_t *mp2 = mp->b_cont; log_ctl_t *lc = (log_ctl_t *)mp->b_rptr; int flags, fac; off_t facility = 0; off_t body = 0; zone_t *zptr = NULL; log_zone_t *lzp; int i; int backlog; /* * Need to special case the global zone here since this may be * called before zone_init. */ if (zoneid == GLOBAL_ZONEID) { lzp = &log_global; } else if ((zptr = zone_find_by_id(zoneid)) == NULL) { /* specified zone doesn't exist, free message and return */ log_freemsg(mp); return; } else { lzp = zone_getspecific(log_zone_key, zptr); } ASSERT(lzp != NULL); if ((lc->flags & lzp->lz_active) == 0) { if (zptr) zone_rele(zptr); log_freemsg(mp); return; } if (panicstr) { /* * Raise the console queue's q_hiwat to ensure that we * capture all panic messages. */ log_consq->q_hiwat = 2 * LOG_HIWAT; log_consq->q_flag &= ~QFULL; /* Message was created while panicking. */ lc->flags |= SL_PANICMSG; } src = (char *)mp2->b_rptr; dst = strstr(src, "FACILITY_AND_PRIORITY] "); if (dst != NULL) { facility = dst - src; body = facility + 23; /* strlen("FACILITY_AND_PRIORITY] ") */ } log_enter(); /* * In the early boot phase hrestime is invalid, then timechanged is 0. * If hrestime is not valid, the ttime is set to 0 here and the correct * ttime is calculated in log_conswitch() later. The log_conswitch() * calculation to determine the correct ttime does not use ttime data * from these log_ctl_t structures; it only uses ttime from log_ctl_t's * that contain good data. * */ lc->ltime = ddi_get_lbolt(); if (timechanged) { lc->ttime = gethrestime_sec(); } else { lc->ttime = 0; } flags = lc->flags & lzp->lz_active; log_seq_no[flags & SL_ERROR]++; log_seq_no[flags & SL_TRACE]++; log_seq_no[flags & SL_CONSOLE]++; /* * If this is in the global zone, start with the backlog, then * walk through the clone logs. If not, just do the clone logs. */ backlog = (zoneid == GLOBAL_ZONEID); i = LOG_LOGMINIDX; while (i <= LOG_LOGMAXIDX) { if (backlog) { /* * Do the backlog this time, then start on the * others. */ backlog = 0; lp = &log_backlog; } else { lp = &lzp->lz_clones[i++]; } if ((lp->log_flags & flags) && lp->log_wanted(lp, lc)) { if (canput(lp->log_q)) { lp->log_overflow = 0; lc->seq_no = log_seq_no[lp->log_flags]; if ((mp2 = copymsg(mp)) == NULL) break; if (facility != 0) { src = (char *)mp2->b_cont->b_rptr; dst = src + facility; fac = (lc->pri & LOG_FACMASK) >> 3; dst += snprintf(dst, LOG_FACSIZE + LOG_PRISIZE, "%s.%s", log_fac[MIN(fac, LOG_NFACILITIES)], log_pri[lc->pri & LOG_PRIMASK]); src += body - 2; /* copy "] " too */ while (*src != '\0') *dst++ = *src++; *dst++ = '\0'; mp2->b_cont->b_wptr = (uchar_t *)dst; } (void) putq(lp->log_q, mp2); } else if (++lp->log_overflow == 1) {
/* * Perform process accounting functions. */ int sysacct(char *fname) { struct acct_globals *ag; struct vnode *vp; int error = 0; if (secpolicy_acct(CRED()) != 0) return (set_errno(EPERM)); ag = zone_getspecific(acct_zone_key, curproc->p_zone); ASSERT(ag != NULL); if (fname == NULL) { /* * Close the file and stop accounting. */ mutex_enter(&ag->aclock); vp = ag->acctvp; ag->acctvp = NULL; mutex_exit(&ag->aclock); if (vp) { error = VOP_CLOSE(vp, FWRITE, 1, (offset_t)0, CRED(), NULL); VN_RELE(vp); } return (error == 0 ? 0 : set_errno(error)); } /* * Either (a) open a new file and begin accounting -or- (b) * switch accounting from an old to a new file. * * (Open the file without holding aclock in case it * sleeps (holding the lock prevents process exit).) */ if ((error = vn_open(fname, UIO_USERSPACE, FWRITE, 0, &vp, (enum create)0, 0)) != 0) { /* SVID compliance */ if (error == EISDIR) error = EACCES; return (set_errno(error)); } if (vp->v_type != VREG) { error = EACCES; } else { mutex_enter(&acct_list_lock); if (acct_find(vp, B_FALSE)) { error = EBUSY; } else { mutex_enter(&ag->aclock); if (ag->acctvp) { vnode_t *oldvp; /* * close old acctvp, and point acct() * at new file by swapping vp and acctvp */ oldvp = ag->acctvp; ag->acctvp = vp; vp = oldvp; } else { /* * no existing file, start accounting .. */ ag->acctvp = vp; vp = NULL; } mutex_exit(&ag->aclock); } mutex_exit(&acct_list_lock); } if (vp) { (void) VOP_CLOSE(vp, FWRITE, 1, (offset_t)0, CRED(), NULL); VN_RELE(vp); } return (error == 0 ? 0 : set_errno(error)); }
/* * NLM_UNLOCK, NLM_UNLOCK_MSG, * NLM4_UNLOCK, NLM4_UNLOCK_MSG, * Client removes one of their locks. */ void nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *resp, struct svc_req *sr, nlm_res_cb cb) { struct nlm_globals *g; struct nlm_host *host; struct netbuf *addr; nlm_rpc_t *rpcp = NULL; vnode_t *vp = NULL; char *netid; char *name; int error; struct flock64 fl; nlm_copy_netobj(&resp->cookie, &argp->cookie); netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); name = argp->alock.caller_name; /* * NLM_UNLOCK operation doesn't have an error code * denoting that operation failed, so we always * return nlm4_granted except when the server is * in a grace period. */ resp->stat.stat = nlm4_granted; g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_findcreate(g, name, netid, addr); if (host == NULL) return; if (cb != NULL) { error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp); if (error != 0) goto out; } DTRACE_PROBE3(start, struct nlm_globals *, g, struct nlm_host *, host, nlm4_unlockargs *, argp); if (NLM_IN_GRACE(g)) { resp->stat.stat = nlm4_denied_grace_period; goto out; } vp = nlm_fh_to_vp(&argp->alock.fh); if (vp == NULL) goto out; /* Convert to local form. */ error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers, F_UNLCK); if (error) goto out; /* BSD: VOP_ADVLOCK(nv->nv_vp, NULL, F_UNLCK, &fl, F_REMOTE); */ error = nlm_vop_frlock(vp, F_SETLK, &fl, F_REMOTELOCK | FREAD | FWRITE, (u_offset_t)0, NULL, CRED(), NULL); DTRACE_PROBE1(unlock__res, int, error); out: /* * If we have a callback function, use that to * deliver the response via another RPC call. */ if (cb != NULL && rpcp != NULL) NLM_INVOKE_CALLBACK("unlock", rpcp, resp, cb); DTRACE_PROBE3(unlock__end, struct nlm_globals *, g, struct nlm_host *, host, nlm4_res *, resp); if (vp != NULL) VN_RELE(vp); if (rpcp != NULL) nlm_host_rele_rpc(host, rpcp); nlm_host_release(g, host); }
/* * NLM_CANCEL, NLM_CANCEL_MSG, * NLM4_CANCEL, NLM4_CANCEL_MSG, * Client gives up waiting for a blocking lock. */ void nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *resp, struct svc_req *sr, nlm_res_cb cb) { struct nlm_globals *g; struct nlm_host *host; struct netbuf *addr; struct nlm_vhold *nvp = NULL; nlm_rpc_t *rpcp = NULL; char *netid; char *name; int error; struct flock64 fl; nlm_copy_netobj(&resp->cookie, &argp->cookie); netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); name = argp->alock.caller_name; g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_findcreate(g, name, netid, addr); if (host == NULL) { resp->stat.stat = nlm4_denied_nolocks; return; } if (cb != NULL) { error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp); if (error != 0) { resp->stat.stat = nlm4_denied_nolocks; goto out; } } DTRACE_PROBE3(start, struct nlm_globals *, g, struct nlm_host *, host, nlm4_cancargs *, argp); if (NLM_IN_GRACE(g)) { resp->stat.stat = nlm4_denied_grace_period; goto out; } nvp = nlm_fh_to_vhold(host, &argp->alock.fh); if (nvp == NULL) { resp->stat.stat = nlm4_stale_fh; goto out; } /* Convert to local form. */ error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers, (argp->exclusive) ? F_WRLCK : F_RDLCK); if (error) { resp->stat.stat = nlm4_failed; goto out; } error = nlm_slreq_unregister(host, nvp, &fl); if (error != 0) { /* * There's no sleeping lock request corresponding * to the lock. Then requested sleeping lock * doesn't exist. */ resp->stat.stat = nlm4_denied; goto out; } fl.l_type = F_UNLCK; error = nlm_vop_frlock(nvp->nv_vp, F_SETLK, &fl, F_REMOTELOCK | FREAD | FWRITE, (u_offset_t)0, NULL, CRED(), NULL); resp->stat.stat = (error == 0) ? nlm4_granted : nlm4_denied; out: /* * If we have a callback function, use that to * deliver the response via another RPC call. */ if (cb != NULL && rpcp != NULL) NLM_INVOKE_CALLBACK("cancel", rpcp, resp, cb); DTRACE_PROBE3(cancel__end, struct nlm_globals *, g, struct nlm_host *, host, nlm4_res *, resp); if (rpcp != NULL) nlm_host_rele_rpc(host, rpcp); nlm_vhold_release(host, nvp); nlm_host_release(g, host); }
/* * NLM_LOCK, NLM_LOCK_MSG, NLM_NM_LOCK * NLM4_LOCK, NLM4_LOCK_MSG, NLM4_NM_LOCK * * Client request to set a lock, possibly blocking. * * If the lock needs to block, we return status blocked to * this RPC call, and then later call back the client with * a "granted" callback. Tricky aspects of this include: * sending a reply before this function returns, and then * borrowing this thread from the RPC service pool for the * wait on the lock and doing the later granted callback. * * We also have to keep a list of locks (pending + granted) * both to handle retransmitted requests, and to keep the * vnodes for those locks active. */ void nlm_do_lock(nlm4_lockargs *argp, nlm4_res *resp, struct svc_req *sr, nlm_reply_cb reply_cb, nlm_res_cb res_cb, nlm_testargs_cb grant_cb) { struct nlm_globals *g; struct flock64 fl; struct nlm_host *host = NULL; struct netbuf *addr; struct nlm_vhold *nvp = NULL; nlm_rpc_t *rpcp = NULL; char *netid; char *name; int error, flags; bool_t do_blocking = FALSE; bool_t do_mon_req = FALSE; enum nlm4_stats status; nlm_copy_netobj(&resp->cookie, &argp->cookie); name = argp->alock.caller_name; netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_findcreate(g, name, netid, addr); if (host == NULL) { DTRACE_PROBE4(no__host, struct nlm_globals *, g, char *, name, char *, netid, struct netbuf *, addr); status = nlm4_denied_nolocks; goto doreply; } DTRACE_PROBE3(start, struct nlm_globals *, g, struct nlm_host *, host, nlm4_lockargs *, argp); /* * If we may need to do _msg_ call needing an RPC * callback, get the RPC client handle now, * so we know if we can bind to the NLM service on * this client. * * Note: host object carries transport type. * One client using multiple transports gets * separate sysids for each of its transports. */ if (res_cb != NULL || (grant_cb != NULL && argp->block == TRUE)) { error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp); if (error != 0) { status = nlm4_denied_nolocks; goto doreply; } } /* * During the "grace period", only allow reclaim. */ if (argp->reclaim == 0 && NLM_IN_GRACE(g)) { status = nlm4_denied_grace_period; goto doreply; } /* * Check whether we missed host shutdown event */ if (nlm_host_get_state(host) != argp->state) nlm_host_notify_server(host, argp->state); /* * Get a hold on the vnode for a lock operation. * Only lock() and share() need vhold objects. */ nvp = nlm_fh_to_vhold(host, &argp->alock.fh); if (nvp == NULL) { status = nlm4_stale_fh; goto doreply; } /* Convert to local form. */ error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers, (argp->exclusive) ? F_WRLCK : F_RDLCK); if (error) { status = nlm4_failed; goto doreply; } /* * Try to lock non-blocking first. If we succeed * getting the lock, we can reply with the granted * status directly and avoid the complications of * making the "granted" RPC callback later. * * This also let's us find out now about some * possible errors like EROFS, etc. */ flags = F_REMOTELOCK | FREAD | FWRITE; error = nlm_vop_frlock(nvp->nv_vp, F_SETLK, &fl, flags, (u_offset_t)0, NULL, CRED(), NULL); DTRACE_PROBE3(setlk__res, struct flock64 *, &fl, int, flags, int, error); switch (error) { case 0: /* Got it without waiting! */ status = nlm4_granted; do_mon_req = TRUE; break; /* EINPROGRESS too? */ case EAGAIN: /* We did not get the lock. Should we block? */ if (argp->block == FALSE || grant_cb == NULL) { status = nlm4_denied; break; } /* * Should block. Try to reserve this thread * so we can use it to wait for the lock and * later send the granted message. If this * reservation fails, say "no resources". */ if (!svc_reserve_thread(sr->rq_xprt)) { status = nlm4_denied_nolocks; break; } /* * OK, can detach this thread, so this call * will block below (after we reply). */ status = nlm4_blocked; do_blocking = TRUE; do_mon_req = TRUE; break; case ENOLCK: /* Failed for lack of resources. */ status = nlm4_denied_nolocks; break; case EROFS: /* read-only file system */ status = nlm4_rofs; break; case EFBIG: /* file too big */ status = nlm4_fbig; break; case EDEADLK: /* dead lock condition */ status = nlm4_deadlck; break; default: status = nlm4_denied; break; } doreply: resp->stat.stat = status; /* * We get one of two function pointers; one for a * normal RPC reply, and another for doing an RPC * "callback" _res reply for a _msg function. * Use either of those to send the reply now. * * If sending this reply fails, just leave the * lock in the list for retransmitted requests. * Cleanup is via unlock or host rele (statmon). */ if (reply_cb != NULL) { /* i.e. nlm_lock_1_reply */ if (!(*reply_cb)(sr->rq_xprt, resp)) svcerr_systemerr(sr->rq_xprt); } if (res_cb != NULL && rpcp != NULL) NLM_INVOKE_CALLBACK("lock", rpcp, resp, res_cb); /* * The reply has been sent to the client. * Start monitoring this client (maybe). * * Note that the non-monitored (NM) calls pass grant_cb=NULL * indicating that the client doesn't support RPC callbacks. * No monitoring for these (lame) clients. */ if (do_mon_req && grant_cb != NULL) nlm_host_monitor(g, host, argp->state); if (do_blocking) { /* * We need to block on this lock, and when that * completes, do the granted RPC call. Note that * we "reserved" this thread above, so we can now * "detach" it from the RPC SVC pool, allowing it * to block indefinitely if needed. */ ASSERT(rpcp != NULL); (void) svc_detach_thread(sr->rq_xprt); nlm_block(argp, host, nvp, rpcp, &fl, grant_cb); } DTRACE_PROBE3(lock__end, struct nlm_globals *, g, struct nlm_host *, host, nlm4_res *, resp); if (rpcp != NULL) nlm_host_rele_rpc(host, rpcp); nlm_vhold_release(host, nvp); nlm_host_release(g, host); }
/* * NLM_TEST, NLM_TEST_MSG, * NLM4_TEST, NLM4_TEST_MSG, * Client inquiry about locks, non-blocking. */ void nlm_do_test(nlm4_testargs *argp, nlm4_testres *resp, struct svc_req *sr, nlm_testres_cb cb) { struct nlm_globals *g; struct nlm_host *host; struct nlm4_holder *lh; struct nlm_owner_handle *oh; nlm_rpc_t *rpcp = NULL; vnode_t *vp = NULL; struct netbuf *addr; char *netid; char *name; int error; struct flock64 fl; nlm_copy_netobj(&resp->cookie, &argp->cookie); name = argp->alock.caller_name; netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_findcreate(g, name, netid, addr); if (host == NULL) { resp->stat.stat = nlm4_denied_nolocks; return; } if (cb != NULL) { error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp); if (error != 0) { resp->stat.stat = nlm4_denied_nolocks; goto out; } } vp = nlm_fh_to_vp(&argp->alock.fh); if (vp == NULL) { resp->stat.stat = nlm4_stale_fh; goto out; } if (NLM_IN_GRACE(g)) { resp->stat.stat = nlm4_denied_grace_period; goto out; } /* Convert to local form. */ error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers, (argp->exclusive) ? F_WRLCK : F_RDLCK); if (error) { resp->stat.stat = nlm4_failed; goto out; } /* BSD: VOP_ADVLOCK(nv->nv_vp, NULL, F_GETLK, &fl, F_REMOTE); */ error = nlm_vop_frlock(vp, F_GETLK, &fl, F_REMOTELOCK | FREAD | FWRITE, (u_offset_t)0, NULL, CRED(), NULL); if (error) { resp->stat.stat = nlm4_failed; goto out; } if (fl.l_type == F_UNLCK) { resp->stat.stat = nlm4_granted; goto out; } resp->stat.stat = nlm4_denied; /* * This lock "test" fails due to a conflicting lock. * * If this is a v1 client, make sure the conflicting * lock range we report can be expressed with 32-bit * offsets. The lock range requested was expressed * as 32-bit offset and length, so at least part of * the conflicting lock should lie below MAX_UOFF32. * If the conflicting lock extends past that, we'll * trim the range to end at MAX_UOFF32 so this lock * can be represented in a 32-bit response. Check * the start also (paranoid, but a low cost check). */ if (sr->rq_vers < NLM4_VERS) { uint64 maxlen; if (fl.l_start > MAX_UOFF32) fl.l_start = MAX_UOFF32; maxlen = MAX_UOFF32 + 1 - fl.l_start; if (fl.l_len > maxlen) fl.l_len = maxlen; } /* * Build the nlm4_holder result structure. * * Note that lh->oh is freed via xdr_free, * xdr_nlm4_holder, xdr_netobj, xdr_bytes. */ oh = kmem_zalloc(sizeof (*oh), KM_SLEEP); oh->oh_sysid = (sysid_t)fl.l_sysid; lh = &resp->stat.nlm4_testrply_u.holder; lh->exclusive = (fl.l_type == F_WRLCK); lh->svid = fl.l_pid; lh->oh.n_len = sizeof (*oh); lh->oh.n_bytes = (void *)oh; lh->l_offset = fl.l_start; lh->l_len = fl.l_len; out: /* * If we have a callback function, use that to * deliver the response via another RPC call. */ if (cb != NULL && rpcp != NULL) NLM_INVOKE_CALLBACK("test", rpcp, resp, cb); if (vp != NULL) VN_RELE(vp); if (rpcp != NULL) nlm_host_rele_rpc(host, rpcp); nlm_host_release(g, host); }
int corectl(int subcode, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) { int error = 0; proc_t *p; refstr_t *rp; size_t size; char *path; core_content_t content = CC_CONTENT_INVALID; struct core_globals *cg; zone_t *zone = curproc->p_zone; cg = zone_getspecific(core_zone_key, zone); ASSERT(cg != NULL); switch (subcode) { case CC_SET_OPTIONS: if ((error = secpolicy_coreadm(CRED())) == 0) { if (arg1 & ~CC_OPTIONS) error = EINVAL; else cg->core_options = (uint32_t)arg1; } break; case CC_GET_OPTIONS: return (cg->core_options); case CC_GET_GLOBAL_PATH: case CC_GET_DEFAULT_PATH: case CC_GET_PROCESS_PATH: if (subcode == CC_GET_GLOBAL_PATH) { mutex_enter(&cg->core_lock); if ((rp = cg->core_file) != NULL) refstr_hold(rp); mutex_exit(&cg->core_lock); } else if (subcode == CC_GET_DEFAULT_PATH) { rp = corectl_path_value(cg->core_default_path); } else { rp = NULL; mutex_enter(&pidlock); if ((p = prfind((pid_t)arg3)) == NULL || p->p_stat == SIDL) { mutex_exit(&pidlock); error = ESRCH; } else { mutex_enter(&p->p_lock); mutex_exit(&pidlock); mutex_enter(&p->p_crlock); if (!hasprocperm(p->p_cred, CRED())) error = EPERM; else if (p->p_corefile != NULL) rp = corectl_path_value(p->p_corefile); mutex_exit(&p->p_crlock); mutex_exit(&p->p_lock); } } if (rp == NULL) { if (error == 0 && suword8((void *)arg1, 0)) error = EFAULT; } else { error = copyoutstr(refstr_value(rp), (char *)arg1, (size_t)arg2, NULL); refstr_rele(rp); } break; case CC_SET_GLOBAL_PATH: case CC_SET_DEFAULT_PATH: if ((error = secpolicy_coreadm(CRED())) != 0) break; /* FALLTHROUGH */ case CC_SET_PROCESS_PATH: if ((size = MIN((size_t)arg2, MAXPATHLEN)) == 0) { error = EINVAL; break; } path = kmem_alloc(size, KM_SLEEP); error = copyinstr((char *)arg1, path, size, NULL); if (error == 0) { if (subcode == CC_SET_PROCESS_PATH) { error = set_proc_info((pid_t)arg3, path, 0); } else if (subcode == CC_SET_DEFAULT_PATH) { corectl_path_set(cg->core_default_path, path); } else if (*path != '\0' && *path != '/') { error = EINVAL; } else { refstr_t *nrp = refstr_alloc(path); mutex_enter(&cg->core_lock); rp = cg->core_file; if (*path == '\0') cg->core_file = NULL; else refstr_hold(cg->core_file = nrp); mutex_exit(&cg->core_lock); if (rp != NULL) refstr_rele(rp); refstr_rele(nrp); } } kmem_free(path, size); break; case CC_SET_GLOBAL_CONTENT: case CC_SET_DEFAULT_CONTENT: if ((error = secpolicy_coreadm(CRED())) != 0) break; /* FALLTHROUGH */ case CC_SET_PROCESS_CONTENT: error = copyin((void *)arg1, &content, sizeof (content)); if (error != 0) break; /* * If any unknown bits are set, don't let this charade * continue. */ if (content & ~CC_CONTENT_ALL) { error = EINVAL; break; } if (subcode == CC_SET_PROCESS_CONTENT) { error = set_proc_info((pid_t)arg2, NULL, content); } else if (subcode == CC_SET_DEFAULT_CONTENT) { corectl_content_set(cg->core_default_content, content); } else { mutex_enter(&cg->core_lock); cg->core_content = content; mutex_exit(&cg->core_lock); } break; case CC_GET_GLOBAL_CONTENT: content = cg->core_content; error = copyout(&content, (void *)arg1, sizeof (content)); break; case CC_GET_DEFAULT_CONTENT: content = corectl_content_value(cg->core_default_content); error = copyout(&content, (void *)arg1, sizeof (content)); break; case CC_GET_PROCESS_CONTENT: mutex_enter(&pidlock); if ((p = prfind((pid_t)arg2)) == NULL || p->p_stat == SIDL) { mutex_exit(&pidlock); error = ESRCH; break; } mutex_enter(&p->p_lock); mutex_exit(&pidlock); mutex_enter(&p->p_crlock); if (!hasprocperm(p->p_cred, CRED())) error = EPERM; else if (p->p_content == NULL) content = CC_CONTENT_NONE; else content = corectl_content_value(p->p_content); mutex_exit(&p->p_crlock); mutex_exit(&p->p_lock); if (error == 0) error = copyout(&content, (void *)arg1, sizeof (content)); break; default: error = EINVAL; break; } if (error) return (set_errno(error)); return (0); }
/* * NLM_SHARE, NLM4_SHARE * * Request a DOS-style share reservation */ void nlm_do_share(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr) { struct nlm_globals *g; struct nlm_host *host; struct netbuf *addr; struct nlm_vhold *nvp = NULL; char *netid; char *name; int error; struct shrlock shr; nlm_copy_netobj(&resp->cookie, &argp->cookie); name = argp->share.caller_name; netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_findcreate(g, name, netid, addr); if (host == NULL) { resp->stat = nlm4_denied_nolocks; return; } DTRACE_PROBE3(share__start, struct nlm_globals *, g, struct nlm_host *, host, nlm4_shareargs *, argp); if (argp->reclaim == 0 && NLM_IN_GRACE(g)) { resp->stat = nlm4_denied_grace_period; goto out; } /* * Get holded vnode when on lock operation. * Only lock() and share() need vhold objects. */ nvp = nlm_fh_to_vhold(host, &argp->share.fh); if (nvp == NULL) { resp->stat = nlm4_stale_fh; goto out; } /* Convert to local form. */ nlm_init_shrlock(&shr, &argp->share, host); error = VOP_SHRLOCK(nvp->nv_vp, F_SHARE, &shr, FREAD | FWRITE, CRED(), NULL); if (error == 0) { resp->stat = nlm4_granted; nlm_host_monitor(g, host, 0); } else { resp->stat = nlm4_denied; } out: DTRACE_PROBE3(share__end, struct nlm_globals *, g, struct nlm_host *, host, nlm4_shareres *, resp); nlm_vhold_release(host, nvp); nlm_host_release(g, host); }