/* * XXX implement more sophisticated logic * * Tsol note: We have already verified the addresses using tsol_check_dest * in sctp_add_faddr, thus no need to redo that here. * We do setup ipp_label_v4 and ipp_label_v6 based on which addresses * we have. */ int sctp_set_hdraddrs(sctp_t *sctp) { sctp_faddr_t *fp; int gotv4 = 0; int gotv6 = 0; conn_t *connp = sctp->sctp_connp; ASSERT(sctp->sctp_faddrs != NULL); ASSERT(sctp->sctp_nsaddrs > 0); /* Set up using the primary first */ connp->conn_faddr_v6 = sctp->sctp_primary->faddr; /* saddr may be unspec; make_mp() will handle this */ connp->conn_saddr_v6 = sctp->sctp_primary->saddr; connp->conn_laddr_v6 = connp->conn_saddr_v6; if (IN6_IS_ADDR_V4MAPPED(&sctp->sctp_primary->faddr)) { if (!is_system_labeled() || sctp_v4_label(sctp, sctp->sctp_primary) == 0) { gotv4 = 1; if (connp->conn_family == AF_INET) { goto done; } } } else { if (!is_system_labeled() || sctp_v6_label(sctp, sctp->sctp_primary) == 0) { gotv6 = 1; } } for (fp = sctp->sctp_faddrs; fp; fp = fp->next) { if (!gotv4 && IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { if (!is_system_labeled() || sctp_v4_label(sctp, fp) == 0) { gotv4 = 1; if (connp->conn_family == AF_INET || gotv6) { break; } } } else if (!gotv6 && !IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { if (!is_system_labeled() || sctp_v6_label(sctp, fp) == 0) { gotv6 = 1; if (gotv4) break; } } } done: if (!gotv4 && !gotv6) return (EACCES); return (0); }
static int nbns_rq_opensocket(struct nbns_rq *rqp) { struct sockaddr_in locaddr; int opt = 1, s; struct nb_ctx *ctx = rqp->nr_nbd; s = rqp->nr_fd = socket(AF_INET, SOCK_DGRAM, 0); if (s < 0) return (errno); if (ctx->nb_flags & NBCF_BC_ENABLE) { if (setsockopt(s, SOL_SOCKET, SO_BROADCAST, &opt, sizeof (opt)) < 0) return (errno); } if (is_system_labeled()) (void) setsockopt(s, SOL_SOCKET, SO_MAC_EXEMPT, &opt, sizeof (opt)); bzero(&locaddr, sizeof (locaddr)); locaddr.sin_family = AF_INET; /* locaddr.sin_len = sizeof (locaddr); */ if (bind(s, (struct sockaddr *)&locaddr, sizeof (locaddr)) < 0) return (errno); return (0); }
/* * interface for passing a peer's connection to gather sensitivity labeling * from for Trusted Solaris. */ papi_status_t papiServiceSetPeer(papi_service_t handle, int peerfd) { papi_status_t result = PAPI_OK; service_t *svc = handle; if (svc == NULL) return (PAPI_BAD_ARGUMENT); if (is_system_labeled()) { short status; if ((snd_msg(svc, S_PASS_PEER_CONNECTION) < 0) || (ioctl(svc->md->writefd, I_SENDFD, peerfd) < 0) || (rcv_msg(svc, R_PASS_PEER_CONNECTION, &status) < 0)) status = MTRANSMITERR; if (status != MOK) { detailed_error(svc, gettext("failed to send peer connection: %s"), lpsched_status_string(status)); result = lpsched_status_to_papi_status(status); } } return (result); }
int get_peer_label(int fd, char **slabel) { if (is_system_labeled()) { ucred_t *uc = NULL; m_label_t *sl; char *pslabel = NULL; /* peer's slabel */ if ((fd < 0) || (slabel == NULL)) { errno = EINVAL; return (-1); } if (getpeerucred(fd, &uc) == -1) return (-1); sl = ucred_getlabel(uc); if (label_to_str(sl, &pslabel, M_INTERNAL, DEF_NAMES) != 0) syslog(LOG_WARNING, "label_to_str(): %m"); ucred_free(uc); if (pslabel != NULL) { syslog(LOG_DEBUG, "get_peer_label(%d, %s): becomes %s", fd, (*slabel ? *slabel : "NULL"), pslabel); if (*slabel != NULL) free(*slabel); *slabel = strdup(pslabel); } } return (0); }
void audit_ftpd_logout(void) { int rd; /* audit record descriptor */ uid_t euid; gid_t egid; uid_t uid; gid_t gid; pid_t pid; struct auditinfo_addr info; if (cannot_audit(0)) { return; } (void) priv_set(PRIV_ON, PRIV_EFFECTIVE, PRIV_PROC_AUDIT, NULL); /* see if terminal id already set */ if (getaudit_addr(&info, sizeof (info)) < 0) { perror("getaudit"); } /* determine if we're preselected */ if (au_preselect(AUE_ftpd_logout, &info.ai_mask, AU_PRS_SUCCESS, AU_PRS_USECACHE) == 0) { (void) priv_set(PRIV_OFF, PRIV_EFFECTIVE, PRIV_PROC_AUDIT, NULL); return; } euid = geteuid(); egid = getegid(); uid = getuid(); gid = getgid(); pid = getpid(); rd = au_open(); /* add subject token */ (void) au_write(rd, au_to_subject_ex(info.ai_auid, euid, egid, uid, gid, pid, pid, &info.ai_termid)); if (is_system_labeled()) (void) au_write(rd, au_to_mylabel()); /* add return token */ errno = 0; #ifdef _LP64 (void) au_write(rd, au_to_return64(0, (int64_t)0)); #else (void) au_write(rd, au_to_return32(0, (int32_t)0)); #endif /* write audit record */ if (au_close(rd, 1, AUE_ftpd_logout) < 0) { (void) au_close(rd, 0, 0); } (void) priv_set(PRIV_OFF, PRIV_EFFECTIVE, PRIV_PROC_AUDIT, NULL); }
bslabel_t * ucred_getlabel(const ucred_t *uc) { /* LINTED: alignment */ bslabel_t *slabel = UCLABEL(uc); if (!is_system_labeled() || slabel == NULL) { errno = EINVAL; return (NULL); } return (slabel); }
void smb_credinit(struct smb_cred *scred, cred_t *cr) { /* cr arg is optional */ if (cr == NULL) cr = ddi_get_cred(); if (is_system_labeled()) { cr = crdup(cr); (void) setpflags(NET_MAC_AWARE, 1, cr); } else { crhold(cr); } scred->scr_cred = cr; }
static const char * check_label(const char *labelstr) { int err; m_label_t *lbl = NULL; if (!is_system_labeled()) return (NULL); err = str_to_label(labelstr, &lbl, MAC_LABEL, L_NO_CORRECTION, NULL); m_label_free(lbl); if (err == -1) return (labelstr); return (NULL); }
/* * Convert a credential into a "ucred". Allow the caller to specify * and aligned buffer, e.g., in an mblk, so we don't have to allocate * memory and copy it twice. * * This function may call cred2ucaud(), which calls CRED(). Since this * can be called from an interrupt thread, receiver's cred (rcr) is needed * to determine whether audit info should be included. */ struct ucred_s * cred2ucred(const cred_t *cr, pid_t pid, void *buf, const cred_t *rcr) { struct ucred_s *uc; uint32_t realsz = ucredminsize(cr); ts_label_t *tslp = is_system_labeled() ? crgetlabel(cr) : NULL; /* The structure isn't always completely filled in, so zero it */ if (buf == NULL) { uc = kmem_zalloc(realsz, KM_SLEEP); } else { bzero(buf, realsz); uc = buf; } uc->uc_size = realsz; uc->uc_pid = pid; uc->uc_projid = cr->cr_projid; uc->uc_zoneid = crgetzoneid(cr); if (REMOTE_PEER_CRED(cr)) { /* * Other than label, the rest of cred info about a * remote peer isn't available. Copy the label directly * after the header where we generally copy the prcred. * That's why we use sizeof (struct ucred_s). The other * offset fields are initialized to 0. */ uc->uc_labeloff = tslp == NULL ? 0 : sizeof (struct ucred_s); } else { uc->uc_credoff = UCRED_CRED_OFF; uc->uc_privoff = UCRED_PRIV_OFF; uc->uc_audoff = UCRED_AUD_OFF; uc->uc_labeloff = tslp == NULL ? 0 : UCRED_LABEL_OFF; cred2prcred(cr, UCCRED(uc)); cred2prpriv(cr, UCPRIV(uc)); if (audoff == 0 || cred2ucaud(cr, UCAUD(uc), rcr) != 0) uc->uc_audoff = 0; } if (tslp != NULL) bcopy(&tslp->tsl_label, UCLABEL(uc), sizeof (bslabel_t)); return (uc); }
/* * _daalloc - * allocates common buffers and structures. * returns pointer to the new structure, else returns NULL on error. */ static struct _dabuff * _daalloc(void) { struct _dabuff *_da = __dabuff; if (_da == NULL) { _da = (struct _dabuff *)calloc((unsigned)1, (unsigned)sizeof (*__dabuff)); if (_da == NULL) return (NULL); DEVALLOC_FILE = "/etc/security/device_allocate"; daf = NULL; __dabuff = _da; system_labeled = is_system_labeled(); } return (__dabuff); }
/* * Don't let port fall into the privileged range. * Since the extra privileged ports can be arbitrary we also * ensure that we exclude those from consideration. * sctp_g_epriv_ports is not sorted thus we loop over it until * there are no changes. * * Note: No locks are held when inspecting sctp_g_*epriv_ports * but instead the code relies on: * - the fact that the address of the array and its size never changes * - the atomic assignment of the elements of the array */ in_port_t sctp_update_next_port(in_port_t port, zone_t *zone) { int i; boolean_t restart = B_FALSE; retry: if (port < sctp_smallest_anon_port) port = sctp_smallest_anon_port; if (port > sctp_largest_anon_port) { if (restart) return (0); restart = B_TRUE; port = sctp_smallest_anon_port; } if (port < sctp_smallest_nonpriv_port) port = sctp_smallest_nonpriv_port; for (i = 0; i < sctp_g_num_epriv_ports; i++) { if (port == sctp_g_epriv_ports[i]) { port++; /* * Make sure whether the port is in the * valid range. * * XXX Note that if sctp_g_epriv_ports contains * all the anonymous ports this will be an * infinite loop. */ goto retry; } } if (is_system_labeled() && (i = tsol_next_port(zone, port, IPPROTO_SCTP, B_TRUE)) != 0) { port = i; goto retry; } return (port); }
/* * Don't allocate the non-needed group entries. Note: this function * must match the code in cred2ucred; they must agree about the * minimal size of the ucred. */ uint32_t ucredminsize(const cred_t *cr) { int ndiff; if (cr == NULL) return (ucredsize); if (REMOTE_PEER_CRED(cr)) { if (is_system_labeled()) return (sizeof (struct ucred_s) + sizeof (bslabel_t)); else return (sizeof (struct ucred_s)); } if (cr->cr_grps == NULL) ndiff = ngroups_max - 1; /* Needs one for prcred_t */ else ndiff = ngroups_max - cr->cr_grps->crg_ngroups; return (ucredsize - ndiff * sizeof (gid_t)); }
/* * tcp_input_data() calls this routine for all packet destined to a * connection to the SSL port, when the SSL kernel proxy is configured * to intercept and process those packets. * A packet may carry multiple SSL records, so the function * calls kssl_input() in a loop, until all records are * handled. * As long as this connection is in handshake, that is until the first * time kssl_input() returns a record to be delivered ustreams, * we maintain the tcp_kssl_inhandshake, and keep an extra reference on * the tcp/connp across the call to kssl_input(). The reason is, that * function may return KSSL_CMD_QUEUED after scheduling an asynchronous * request and cause tcp_kssl_callback() to be called on a different CPU, * which could decrement the conn/tcp reference before we get to increment it. */ void tcp_kssl_input(tcp_t *tcp, mblk_t *mp, cred_t *cr) { struct conn_s *connp = tcp->tcp_connp; tcp_t *listener; mblk_t *ind_mp; kssl_cmd_t kssl_cmd; mblk_t *outmp; struct T_conn_ind *tci; boolean_t more = B_FALSE; boolean_t conn_held = B_FALSE; boolean_t is_v4; void *addr; if (is_system_labeled() && mp != NULL) { ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); /* * Provide for protocols above TCP such as RPC. NOPID leaves * db_cpid unchanged. * The cred could have already been set. */ if (cr != NULL) mblk_setcred(mp, cr, NOPID); } /* First time here, allocate the SSL context */ if (tcp->tcp_kssl_ctx == NULL) { ASSERT(tcp->tcp_kssl_pending); is_v4 = (connp->conn_ipversion == IPV4_VERSION); if (is_v4) { addr = &connp->conn_faddr_v4; } else { addr = &connp->conn_faddr_v6; } if (kssl_init_context(tcp->tcp_kssl_ent, addr, is_v4, tcp->tcp_mss, &(tcp->tcp_kssl_ctx)) != KSSL_STS_OK) { tcp->tcp_kssl_pending = B_FALSE; kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); tcp->tcp_kssl_ent = NULL; goto no_can_do; } tcp->tcp_kssl_inhandshake = B_TRUE; /* we won't be needing this one after now */ kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); tcp->tcp_kssl_ent = NULL; } if (tcp->tcp_kssl_inhandshake) { CONN_INC_REF(connp); conn_held = B_TRUE; } do { kssl_cmd = kssl_input(tcp->tcp_kssl_ctx, mp, &outmp, &more, tcp_kssl_input_callback, (void *)tcp); switch (kssl_cmd) { case KSSL_CMD_SEND: DTRACE_PROBE(kssl_cmd_send); /* * We need to increment tcp_squeue_bytes to account * for the extra bytes internally injected to the * outgoing flow. tcp_output() will decrement it * as they are sent out. */ mutex_enter(&tcp->tcp_non_sq_lock); tcp->tcp_squeue_bytes += msgdsize(outmp); mutex_exit(&tcp->tcp_non_sq_lock); tcp_output(connp, outmp, NULL, NULL); /* FALLTHROUGH */ case KSSL_CMD_NONE: DTRACE_PROBE(kssl_cmd_none); if (tcp->tcp_kssl_pending) { mblk_t *ctxmp; /* * SSL handshake successfully started - * pass up the T_CONN_IND */ mp = NULL; listener = tcp->tcp_listener; tcp->tcp_kssl_pending = B_FALSE; ind_mp = tcp->tcp_conn.tcp_eager_conn_ind; ASSERT(ind_mp != NULL); ctxmp = allocb(sizeof (kssl_ctx_t), BPRI_MED); /* * Give this session a chance to fall back to * userland SSL */ if (ctxmp == NULL) goto no_can_do; /* * attach the kssl_ctx to the conn_ind and * transform it to a T_SSL_PROXY_CONN_IND. * Hold it so that it stays valid till it * reaches the stream head. */ kssl_hold_ctx(tcp->tcp_kssl_ctx); *((kssl_ctx_t *)ctxmp->b_rptr) = tcp->tcp_kssl_ctx; ctxmp->b_wptr = ctxmp->b_rptr + sizeof (kssl_ctx_t); ind_mp->b_cont = ctxmp; tci = (struct T_conn_ind *)ind_mp->b_rptr; tci->PRIM_type = T_SSL_PROXY_CONN_IND; /* * The code below is copied from tcp_input_data * delivering the T_CONN_IND on a TCPS_SYN_RCVD, * and all conn ref cnt comments apply. */ tcp->tcp_conn.tcp_eager_conn_ind = NULL; tcp->tcp_tconnind_started = B_TRUE; CONN_INC_REF(connp); CONN_INC_REF(listener->tcp_connp); if (listener->tcp_connp->conn_sqp == connp->conn_sqp) { tcp_send_conn_ind(listener->tcp_connp, ind_mp, listener->tcp_connp->conn_sqp); CONN_DEC_REF(listener->tcp_connp); } else { SQUEUE_ENTER_ONE( listener->tcp_connp->conn_sqp, ind_mp, tcp_send_conn_ind, listener->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_CONN_IND); } } break; case KSSL_CMD_QUEUED: DTRACE_PROBE(kssl_cmd_queued); /* * We hold the conn_t here because an asynchronous * request have been queued and * tcp_kssl_input_callback() will be called later. * It will release the conn_t */ CONN_INC_REF(connp); break; case KSSL_CMD_DELIVER_PROXY: case KSSL_CMD_DELIVER_SSL: DTRACE_PROBE(kssl_cmd_proxy__ssl); /* * Keep accumulating if not yet accepted. */ if (tcp->tcp_listener != NULL) { DTRACE_PROBE1(kssl_mblk__input_rcv_enqueue, mblk_t *, outmp); tcp_rcv_enqueue(tcp, outmp, msgdsize(outmp), NULL); } else { DTRACE_PROBE1(kssl_mblk__input_putnext, mblk_t *, outmp); putnext(connp->conn_rq, outmp); } /* * We're at a phase where records are sent upstreams, * past the handshake */ tcp->tcp_kssl_inhandshake = B_FALSE; break; case KSSL_CMD_NOT_SUPPORTED: DTRACE_PROBE(kssl_cmd_not_supported); /* * Stop the SSL processing by the proxy, and * switch to the userland SSL */ if (tcp->tcp_kssl_pending) { tcp->tcp_kssl_pending = B_FALSE; no_can_do: DTRACE_PROBE1(kssl_no_can_do, tcp_t *, tcp); listener = tcp->tcp_listener; ind_mp = tcp->tcp_conn.tcp_eager_conn_ind; ASSERT(ind_mp != NULL); if (tcp->tcp_kssl_ctx != NULL) { kssl_release_ctx(tcp->tcp_kssl_ctx); tcp->tcp_kssl_ctx = NULL; } /* * Make this a T_SSL_PROXY_CONN_IND, for the * stream head to deliver it to the SSL * fall-back listener */ tci = (struct T_conn_ind *)ind_mp->b_rptr; tci->PRIM_type = T_SSL_PROXY_CONN_IND; /* * The code below is copied from tcp_input_data * delivering the T_CONN_IND on a TCPS_SYN_RCVD, * and all conn ref cnt comments apply. */ tcp->tcp_conn.tcp_eager_conn_ind = NULL; tcp->tcp_tconnind_started = B_TRUE; CONN_INC_REF(connp); CONN_INC_REF(listener->tcp_connp); if (listener->tcp_connp->conn_sqp == connp->conn_sqp) { tcp_send_conn_ind(listener->tcp_connp, ind_mp, listener->tcp_connp->conn_sqp); CONN_DEC_REF(listener->tcp_connp); } else { SQUEUE_ENTER_ONE( listener->tcp_connp->conn_sqp, ind_mp, tcp_send_conn_ind, listener->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_CONN_IND); } } if (mp != NULL) tcp_rcv_enqueue(tcp, mp, msgdsize(mp), NULL); break; } mp = NULL; } while (more); if (conn_held) { CONN_DEC_REF(connp); } }
/* * Returns 0 on success, ENOMEM on memory allocation failure, EHOSTUNREACH * if the connection credentials fail remote host accreditation or * if the new destination does not support the previously established * connection security label. If sleep is true, this function should * never fail for a memory allocation failure. The boolean parameter * "first" decides whether the newly created faddr structure should be * added at the beginning of the list or at the end. * * Note: caller must hold conn fanout lock. */ int sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first) { sctp_faddr_t *faddr; mblk_t *timer_mp; int err; conn_t *connp = sctp->sctp_connp; if (is_system_labeled()) { ip_xmit_attr_t *ixa = connp->conn_ixa; ts_label_t *effective_tsl = NULL; ASSERT(ixa->ixa_tsl != NULL); /* * Verify the destination is allowed to receive packets * at the security label of the connection we are initiating. * * tsol_check_dest() will create a new effective label for * this connection with a modified label or label flags only * if there are changes from the original label. * * Accept whatever label we get if this is the first * destination address for this connection. The security * label and label flags must match any previuous settings * for all subsequent destination addresses. */ if (IN6_IS_ADDR_V4MAPPED(addr)) { uint32_t dst; IN6_V4MAPPED_TO_IPADDR(addr, dst); err = tsol_check_dest(ixa->ixa_tsl, &dst, IPV4_VERSION, connp->conn_mac_mode, connp->conn_zone_is_global, &effective_tsl); } else { err = tsol_check_dest(ixa->ixa_tsl, addr, IPV6_VERSION, connp->conn_mac_mode, connp->conn_zone_is_global, &effective_tsl); } if (err != 0) return (err); if (sctp->sctp_faddrs == NULL && effective_tsl != NULL) { ip_xmit_attr_replace_tsl(ixa, effective_tsl); } else if (effective_tsl != NULL) { label_rele(effective_tsl); return (EHOSTUNREACH); } } if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL) return (ENOMEM); bzero(faddr, sizeof (*faddr)); timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer, sleep); if (timer_mp == NULL) { kmem_cache_free(sctp_kmem_faddr_cache, faddr); return (ENOMEM); } ((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr; /* Start with any options set on the conn */ faddr->ixa = conn_get_ixa_exclusive(connp); if (faddr->ixa == NULL) { freemsg(timer_mp); kmem_cache_free(sctp_kmem_faddr_cache, faddr); return (ENOMEM); } faddr->ixa->ixa_notify_cookie = connp->conn_sctp; sctp_init_faddr(sctp, faddr, addr, timer_mp); ASSERT(faddr->ixa->ixa_cred != NULL); /* ip_attr_connect didn't allow broadcats/multicast dest */ ASSERT(faddr->next == NULL); if (sctp->sctp_faddrs == NULL) { ASSERT(sctp->sctp_lastfaddr == NULL); /* only element on list; first and last are same */ sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr; } else if (first) { ASSERT(sctp->sctp_lastfaddr != NULL); faddr->next = sctp->sctp_faddrs; sctp->sctp_faddrs = faddr; } else { sctp->sctp_lastfaddr->next = faddr; sctp->sctp_lastfaddr = faddr; } sctp->sctp_nfaddrs++; return (0); }
/* * Connect to a peer - this function inserts the sctp in the * bind and conn fanouts, sends the INIT, and replies to the client * with an OK ack. */ int sctp_connect(sctp_t *sctp, const struct sockaddr *dst, uint32_t addrlen, cred_t *cr, pid_t pid) { sin_t *sin; sin6_t *sin6; in6_addr_t dstaddr; in_port_t dstport; mblk_t *initmp; sctp_tf_t *tbf; sctp_t *lsctp; char buf[INET6_ADDRSTRLEN]; int sleep = sctp->sctp_cansleep ? KM_SLEEP : KM_NOSLEEP; int err; sctp_faddr_t *cur_fp; sctp_stack_t *sctps = sctp->sctp_sctps; conn_t *connp = sctp->sctp_connp; uint_t scope_id = 0; ip_xmit_attr_t *ixa; /* * Determine packet type based on type of address passed in * the request should contain an IPv4 or IPv6 address. * Make sure that address family matches the type of * family of the address passed down. */ if (addrlen < sizeof (sin_t)) { return (EINVAL); } switch (dst->sa_family) { case AF_INET: sin = (sin_t *)dst; /* Check for attempt to connect to non-unicast */ if (CLASSD(sin->sin_addr.s_addr) || (sin->sin_addr.s_addr == INADDR_BROADCAST)) { ip0dbg(("sctp_connect: non-unicast\n")); return (EINVAL); } if (connp->conn_ipv6_v6only) return (EAFNOSUPPORT); /* convert to v6 mapped */ /* Check for attempt to connect to INADDR_ANY */ if (sin->sin_addr.s_addr == INADDR_ANY) { struct in_addr v4_addr; /* * SunOS 4.x and 4.3 BSD allow an application * to connect a TCP socket to INADDR_ANY. * When they do this, the kernel picks the * address of one interface and uses it * instead. The kernel usually ends up * picking the address of the loopback * interface. This is an undocumented feature. * However, we provide the same thing here * in case any TCP apps that use this feature * are being ported to SCTP... */ v4_addr.s_addr = htonl(INADDR_LOOPBACK); IN6_INADDR_TO_V4MAPPED(&v4_addr, &dstaddr); } else { IN6_INADDR_TO_V4MAPPED(&sin->sin_addr, &dstaddr); } dstport = sin->sin_port; break; case AF_INET6: sin6 = (sin6_t *)dst; /* Check for attempt to connect to non-unicast. */ if ((addrlen < sizeof (sin6_t)) || IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { ip0dbg(("sctp_connect: non-unicast\n")); return (EINVAL); } if (connp->conn_ipv6_v6only && IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { return (EAFNOSUPPORT); } /* check for attempt to connect to unspec */ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { dstaddr = ipv6_loopback; } else { dstaddr = sin6->sin6_addr; if (IN6_IS_ADDR_LINKLOCAL(&dstaddr)) { sctp->sctp_linklocal = 1; scope_id = sin6->sin6_scope_id; } } dstport = sin6->sin6_port; connp->conn_flowinfo = sin6->sin6_flowinfo; break; default: dprint(1, ("sctp_connect: unknown family %d\n", dst->sa_family)); return (EAFNOSUPPORT); } (void) inet_ntop(AF_INET6, &dstaddr, buf, sizeof (buf)); dprint(1, ("sctp_connect: attempting connect to %s...\n", buf)); RUN_SCTP(sctp); if (connp->conn_family != dst->sa_family || (connp->conn_state_flags & CONN_CLOSING)) { WAKE_SCTP(sctp); return (EINVAL); } /* We update our cred/cpid based on the caller of connect */ if (connp->conn_cred != cr) { crhold(cr); crfree(connp->conn_cred); connp->conn_cred = cr; } connp->conn_cpid = pid; /* Cache things in conn_ixa without any refhold */ ixa = connp->conn_ixa; ixa->ixa_cred = cr; ixa->ixa_cpid = pid; if (is_system_labeled()) { /* We need to restart with a label based on the cred */ ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); } switch (sctp->sctp_state) { case SCTPS_IDLE: { struct sockaddr_storage ss; /* * We support a quick connect capability here, allowing * clients to transition directly from IDLE to COOKIE_WAIT. * sctp_bindi will pick an unused port, insert the connection * in the bind hash and transition to BOUND state. SCTP * picks and uses what it considers the optimal local address * set (just like specifiying INADDR_ANY to bind()). */ dprint(1, ("sctp_connect: idle, attempting bind...\n")); ASSERT(sctp->sctp_nsaddrs == 0); bzero(&ss, sizeof (ss)); ss.ss_family = connp->conn_family; WAKE_SCTP(sctp); if ((err = sctp_bind(sctp, (struct sockaddr *)&ss, sizeof (ss))) != 0) { return (err); } RUN_SCTP(sctp); /* FALLTHRU */ } case SCTPS_BOUND: ASSERT(sctp->sctp_nsaddrs > 0); /* do the connect */ /* XXX check for attempt to connect to self */ connp->conn_fport = dstport; ASSERT(sctp->sctp_iphc); ASSERT(sctp->sctp_iphc6); /* * Don't allow this connection to completely duplicate * an existing connection. * * Ensure that the duplicate check and insertion is atomic. */ sctp_conn_hash_remove(sctp); tbf = &sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, connp->conn_ports)]; mutex_enter(&tbf->tf_lock); lsctp = sctp_lookup(sctp, &dstaddr, tbf, &connp->conn_ports, SCTPS_COOKIE_WAIT); if (lsctp != NULL) { /* found a duplicate connection */ mutex_exit(&tbf->tf_lock); SCTP_REFRELE(lsctp); WAKE_SCTP(sctp); return (EADDRINUSE); } /* * OK; set up the peer addr (this may grow after we get * the INIT ACK from the peer with additional addresses). */ if ((err = sctp_add_faddr(sctp, &dstaddr, sleep, B_FALSE)) != 0) { mutex_exit(&tbf->tf_lock); WAKE_SCTP(sctp); return (err); } cur_fp = sctp->sctp_faddrs; ASSERT(cur_fp->ixa != NULL); /* No valid src addr, return. */ if (cur_fp->state == SCTP_FADDRS_UNREACH) { mutex_exit(&tbf->tf_lock); WAKE_SCTP(sctp); return (EADDRNOTAVAIL); } sctp->sctp_primary = cur_fp; sctp->sctp_current = cur_fp; sctp->sctp_mss = cur_fp->sfa_pmss; sctp_conn_hash_insert(tbf, sctp, 1); mutex_exit(&tbf->tf_lock); ixa = cur_fp->ixa; ASSERT(ixa->ixa_cred != NULL); if (scope_id != 0) { ixa->ixa_flags |= IXAF_SCOPEID_SET; ixa->ixa_scopeid = scope_id; } else { ixa->ixa_flags &= ~IXAF_SCOPEID_SET; } /* initialize composite headers */ if ((err = sctp_set_hdraddrs(sctp)) != 0) { sctp_conn_hash_remove(sctp); WAKE_SCTP(sctp); return (err); } if ((err = sctp_build_hdrs(sctp, KM_SLEEP)) != 0) { sctp_conn_hash_remove(sctp); WAKE_SCTP(sctp); return (err); } /* * Turn off the don't fragment bit on the (only) faddr, * so that if one of the messages exchanged during the * initialization sequence exceeds the path mtu, it * at least has a chance to get there. SCTP does no * fragmentation of initialization messages. The DF bit * will be turned on again in sctp_send_cookie_echo() * (but the cookie echo will still be sent with the df bit * off). */ cur_fp->df = B_FALSE; /* Mark this address as alive */ cur_fp->state = SCTP_FADDRS_ALIVE; /* Send the INIT to the peer */ SCTP_FADDR_TIMER_RESTART(sctp, cur_fp, cur_fp->rto); sctp->sctp_state = SCTPS_COOKIE_WAIT; /* * sctp_init_mp() could result in modifying the source * address list, so take the hash lock. */ mutex_enter(&tbf->tf_lock); initmp = sctp_init_mp(sctp, cur_fp); if (initmp == NULL) { mutex_exit(&tbf->tf_lock); /* * It may happen that all the source addresses * (loopback/link local) are removed. In that case, * faile the connect. */ if (sctp->sctp_nsaddrs == 0) { sctp_conn_hash_remove(sctp); SCTP_FADDR_TIMER_STOP(cur_fp); WAKE_SCTP(sctp); return (EADDRNOTAVAIL); } /* Otherwise, let the retransmission timer retry */ WAKE_SCTP(sctp); goto notify_ulp; } mutex_exit(&tbf->tf_lock); /* * On a clustered note send this notification to the clustering * subsystem. */ if (cl_sctp_connect != NULL) { uchar_t *slist; uchar_t *flist; size_t ssize; size_t fsize; fsize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs; ssize = sizeof (in6_addr_t) * sctp->sctp_nsaddrs; slist = kmem_alloc(ssize, KM_SLEEP); flist = kmem_alloc(fsize, KM_SLEEP); /* The clustering module frees the lists */ sctp_get_saddr_list(sctp, slist, ssize); sctp_get_faddr_list(sctp, flist, fsize); (*cl_sctp_connect)(connp->conn_family, slist, sctp->sctp_nsaddrs, connp->conn_lport, flist, sctp->sctp_nfaddrs, connp->conn_fport, B_TRUE, (cl_sctp_handle_t)sctp); } ASSERT(ixa->ixa_cred != NULL); ASSERT(ixa->ixa_ire != NULL); (void) conn_ip_output(initmp, ixa); BUMP_LOCAL(sctp->sctp_opkts); WAKE_SCTP(sctp); notify_ulp: sctp_set_ulp_prop(sctp); return (0); default: ip0dbg(("sctp_connect: invalid state. %d\n", sctp->sctp_state)); WAKE_SCTP(sctp); return (EINVAL); } }
int nfslib_bindit(struct netconfig *nconf, struct netbuf **addr, struct nd_hostserv *hs, int backlog) { int fd; struct t_bind *ntb; struct t_bind tb; struct nd_addrlist *addrlist; struct t_optmgmt req, resp; struct opthdr *opt; char reqbuf[128]; bool_t use_any = FALSE; bool_t gzone = TRUE; if ((fd = nfslib_transport_open(nconf)) == -1) { syslog(LOG_ERR, "cannot establish transport service over %s", nconf->nc_device); return (-1); } addrlist = (struct nd_addrlist *)NULL; /* nfs4_callback service does not used a fieed port number */ if (strcmp(hs->h_serv, "nfs4_callback") == 0) { tb.addr.maxlen = 0; tb.addr.len = 0; tb.addr.buf = 0; use_any = TRUE; gzone = (getzoneid() == GLOBAL_ZONEID); } else if (netdir_getbyname(nconf, hs, &addrlist) != 0) { syslog(LOG_ERR, "Cannot get address for transport %s host %s service %s", nconf->nc_netid, hs->h_host, hs->h_serv); (void) t_close(fd); return (-1); } if (strcmp(nconf->nc_proto, "tcp") == 0) { /* * If we're running over TCP, then set the * SO_REUSEADDR option so that we can bind * to our preferred address even if previously * left connections exist in FIN_WAIT states. * This is somewhat bogus, but otherwise you have * to wait 2 minutes to restart after killing it. */ if (reuseaddr(fd) == -1) { syslog(LOG_WARNING, "couldn't set SO_REUSEADDR option on transport"); } } else if (strcmp(nconf->nc_proto, "udp") == 0) { /* * In order to run MLP on UDP, we need to handle creds. */ if (recvucred(fd) == -1) { syslog(LOG_WARNING, "couldn't set SO_RECVUCRED option on transport"); } } /* * Make non global zone nfs4_callback port MLP */ if (use_any && is_system_labeled() && !gzone) { if (anonmlp(fd) == -1) { /* * failing to set this option means nfs4_callback * could fail silently later. So fail it with * with an error message now. */ syslog(LOG_ERR, "couldn't set SO_ANON_MLP option on transport"); (void) t_close(fd); return (-1); } } if (nconf->nc_semantics == NC_TPI_CLTS) tb.qlen = 0; else tb.qlen = backlog; /* LINTED pointer alignment */ ntb = (struct t_bind *)t_alloc(fd, T_BIND, T_ALL); if (ntb == (struct t_bind *)NULL) { syslog(LOG_ERR, "t_alloc failed: t_errno %d, %m", t_errno); (void) t_close(fd); netdir_free((void *)addrlist, ND_ADDRLIST); return (-1); } /* * XXX - what about the space tb->addr.buf points to? This should * be either a memcpy() to/from the buf fields, or t_alloc(fd,T_BIND,) * should't be called with T_ALL. */ if (addrlist) tb.addr = *(addrlist->n_addrs); /* structure copy */ if (t_bind(fd, &tb, ntb) == -1) { syslog(LOG_ERR, "t_bind failed: t_errno %d, %m", t_errno); (void) t_free((char *)ntb, T_BIND); netdir_free((void *)addrlist, ND_ADDRLIST); (void) t_close(fd); return (-1); } /* make sure we bound to the right address */ if (use_any == FALSE && (tb.addr.len != ntb->addr.len || memcmp(tb.addr.buf, ntb->addr.buf, tb.addr.len) != 0)) { syslog(LOG_ERR, "t_bind to wrong address"); (void) t_free((char *)ntb, T_BIND); netdir_free((void *)addrlist, ND_ADDRLIST); (void) t_close(fd); return (-1); } /* * Call nfs4svc_setport so that the kernel can be * informed what port number the daemon is listing * for incoming connection requests. */ if ((nconf->nc_semantics == NC_TPI_COTS || nconf->nc_semantics == NC_TPI_COTS_ORD) && Mysvc4 != NULL) (*Mysvc4)(fd, NULL, nconf, NFS4_SETPORT, &ntb->addr); *addr = &ntb->addr; netdir_free((void *)addrlist, ND_ADDRLIST); if (strcmp(nconf->nc_proto, "tcp") == 0) { /* * Disable the Nagle algorithm on TCP connections. * Connections accepted from this listener will * inherit the listener options. */ /* LINTED pointer alignment */ opt = (struct opthdr *)reqbuf; opt->level = IPPROTO_TCP; opt->name = TCP_NODELAY; opt->len = sizeof (int); /* LINTED pointer alignment */ *(int *)((char *)opt + sizeof (*opt)) = 1; req.flags = T_NEGOTIATE; req.opt.len = sizeof (*opt) + opt->len; req.opt.buf = (char *)opt; resp.flags = 0; resp.opt.buf = reqbuf; resp.opt.maxlen = sizeof (reqbuf); if (t_optmgmt(fd, &req, &resp) < 0 || resp.flags != T_SUCCESS) { syslog(LOG_ERR, "couldn't set NODELAY option for proto %s: t_errno = %d, %m", nconf->nc_proto, t_errno); } nfslib_set_sockbuf(fd); } return (fd); }
void s_inquire_request_rank(char *m, MESG *md) { char *form; char *dest; char *pwheel; char *user; char *req_id; RSTATUS *rp; RSTATUS *found = NULL; int found_rank = 0; short prop; char files[BUFSIZ]; int i; (void) getmessage(m, S_INQUIRE_REQUEST_RANK, &prop, &form, &dest, &req_id, &user, &pwheel); syslog(LOG_DEBUG, "s_inquire_request_rank(%d, %s, %s, %s, %s, %s)", prop, (form ? form : "NULL"), (dest ? dest : "NULL"), (req_id ? req_id : "NULL"), (user ? user : "******"), (pwheel ? pwheel : "NULL")); for (i = 0; PStatus != NULL && PStatus[i] != NULL; i++) PStatus[i]->nrequests = 0; for (rp = Request_List; rp != NULL; rp = rp->next) { if (rp->printer && !(rp->request->outcome & RS_DONE)) rp->printer->nrequests++; if (*form && !SAME(form, rp->request->form)) continue; if (*dest && !STREQU(dest, rp->request->destination)) { if (!rp->printer) continue; if (!STREQU(dest, rp->printer->printer->name)) continue; } if (*req_id && !STREQU(req_id, rp->secure->req_id)) continue; if (*user && !bangequ(user, rp->secure->user)) continue; if (*pwheel && !SAME(pwheel, rp->pwheel_name)) continue; /* * For Trusted Extensions, we need to check the sensitivity * label of the connection and job before we return it to the * client. */ if ((md->admin <= 0) && (is_system_labeled()) && (md->slabel != NULL) && (rp->secure->slabel != NULL) && (!STREQU(md->slabel, rp->secure->slabel))) continue; if (found) { GetRequestFiles(found->request, files, sizeof (files)); mputm(md, R_INQUIRE_REQUEST_RANK, MOKMORE, found->secure->req_id, found->request->user, /* bgolden 091996, bug 1257405 */ found->secure->slabel, found->secure->size, found->secure->date, found->request->outcome, found->printer->printer->name, (found->form? found->form->form->name : ""), NB(found->pwheel_name), found_rank, files); } found = rp; found_rank = found->printer->nrequests; } if (found) { GetRequestFiles(found->request, files, sizeof (files)); mputm(md, R_INQUIRE_REQUEST_RANK, MOK, found->secure->req_id, found->request->user, /* bgolden 091996, bug 1257405 */ found->secure->slabel, found->secure->size, found->secure->date, found->request->outcome, found->printer->printer->name, (found->form? found->form->form->name : ""), NB(found->pwheel_name), found_rank, files); } else mputm(md, R_INQUIRE_REQUEST_RANK, MNOINFO, "", "", "", 0L, 0L, 0, "", "", "", 0, ""); }
static char * _cancel(MESG *md, char *dest, char *user, char *req_id) { static RSTATUS *rp; static char *s_dest; static char *s_user; static char *s_req_id; static int current; RSTATUS *crp; char *creq_id; syslog(LOG_DEBUG, "_cancel(%s, %s, %s)", (dest ? dest : "NULL"), (user ? user : "******"), (req_id ? req_id : "NULL")); if (dest || user || req_id) { s_dest = dest; if (STREQU(user, "!")) s_user = strdup("all!all"); else s_user = user; s_req_id = req_id; rp = Request_List; current = 0; if (STREQU(s_req_id, CURRENT_REQ)) { current = 1; s_req_id = NULL; } } while (rp != NULL) { crp = rp; rp = rp->next; if (*s_dest && !STREQU(s_dest, crp->request->destination)) continue; if (current && !(crp->request->outcome & RS_PRINTING)) continue; if (s_req_id && *s_req_id && !STREQU(s_req_id, crp->secure->req_id)) continue; if (*s_user && !bangequ(s_user, crp->secure->user)) continue; if (!md->admin && md->uid != crp->secure->uid) { errno = MNOPERM; return (Strdup(crp->secure->req_id)); } /* * For Trusted Extensions, we need to check the * sensitivity label of the * connection and job before we try to cancel it. */ if ((md->admin == 0) && (is_system_labeled()) && (md->slabel != NULL) && (crp->secure->slabel != NULL) && (!STREQU(md->slabel, crp->secure->slabel))) continue; crp->reason = MOK; creq_id = Strdup(crp->secure->req_id); syslog(LOG_DEBUG, "cancel reqid (%s) uid: %d, secureuid: %d", creq_id, md->uid, crp->secure->uid); if (cancel(crp, (md->uid != crp->secure->uid))) errno = MOK; else errno = M2LATE; return (creq_id); } errno = MUNKNOWN; return (NULL); }
void dirinit(char *mntpnt, char *map, char *opts, int direct, char **stack, char ***stkptr) { struct autodir *dir; char *p; if (strcmp(map, "-null") == 0) { if (strcmp(mntpnt, "/-") == 0) nodirect_map = TRUE; goto enter; } p = mntpnt + (strlen(mntpnt) - 1); if (*p == '/') *p = '\0'; /* trim trailing / */ if (*mntpnt != '/') { pr_msg("dir %s must start with '/'", mntpnt); return; } if (p = check_hier(mntpnt)) { pr_msg("hierarchical mountpoint: %s and %s", p, mntpnt); return; } /* * If it's a direct map then call dirinit * for every map entry. */ if ((strcmp(mntpnt, "/-") == 0) && !(nodirect_map)) { (void) loaddirect_map(map, map, opts, stack, stkptr); return; } /* * Home directories are polyinstantiated on * labeled systems. */ if (is_system_labeled() && (strcmp(mntpnt, "/home") == 0) && (strcmp(map, "auto_home") == 0)) { (void) loadzone_maps(mntpnt, map, opts, stack, stkptr); return; } enter: dir = (struct autodir *)malloc(sizeof (*dir)); if (dir == NULL) goto alloc_failed; dir->dir_name = strdup(mntpnt); if (dir->dir_name == NULL) goto alloc_failed; dir->dir_map = strdup(map); if (dir->dir_map == NULL) goto alloc_failed; dir->dir_opts = strdup(opts); if (dir->dir_opts == NULL) goto alloc_failed; dir->dir_direct = direct; dir->dir_remount = 0; dir->dir_next = NULL; /* * Append to dir chain */ if (dir_head == NULL) dir_head = dir; else dir_tail->dir_next = dir; dir->dir_prev = dir_tail; dir_tail = dir; return; alloc_failed: if (dir != NULL) { if (dir->dir_opts) free(dir->dir_opts); if (dir->dir_map) free(dir->dir_map); if (dir->dir_name) free(dir->dir_name); free(dir); } pr_msg("dirinit: memory allocation failed"); }
void s_start_change_request(char *m, MESG *md) { char *req_id; char *req_file = ""; short status; RSTATUS *rp; char *path; char tmpName[BUFSIZ]; struct stat tmpBuf; (void) getmessage(m, S_START_CHANGE_REQUEST, &req_id); syslog(LOG_DEBUG, "s_start_change_request(%s)", (req_id ? req_id : "NULL")); if (!(rp = request_by_id(req_id))) status = MUNKNOWN; else if ((md->admin == 0) && (is_system_labeled()) && (md->slabel != NULL) && (rp->secure->slabel != NULL) && (!STREQU(md->slabel, rp->secure->slabel))) status = MUNKNOWN; else if (rp->request->outcome & RS_DONE) status = M2LATE; else if (!md->admin && md->uid != rp->secure->uid) status = MNOPERM; else if (rp->request->outcome & RS_CHANGING) status = MNOOPEN; else if (rp->request->outcome & RS_NOTIFYING) status = MBUSY; else { status = MOK; if (rp->request->outcome & RS_FILTERING && !(rp->request->outcome & RS_STOPPED)) { rp->request->outcome |= (RS_REFILTER|RS_STOPPED); terminate(rp->exec); } if (rp->request->outcome & RS_PRINTING && !(rp->request->outcome & RS_STOPPED)) { rp->request->outcome |= RS_STOPPED; terminate(rp->printer->exec); } rp->request->outcome |= RS_CHANGING; /* * Change the ownership of the request file to be "md->uid". * Either this is identical to "rp->secure->uid", or it is * "Lp_Uid" or it is root. The idea is that the * person at the other end needs access, and that may not * be who queued the request. */ path = makepath(Lp_Tmp, rp->req_file, (char *)0); (void) Chown(path, md->uid, rp->secure->gid); Free(path); #ifdef LP_USE_PAPI_ATTR /* * Check if the PAPI job attribute file exists, if it does * change the ownership of the file to be "md->uid". * Either this is identical to "rp->secure->uid", or it is * "Lp_Uid" or it is root. The idea is that the * person at the other end needs access, and that may not * be who queued the request. */ snprintf(tmpName, sizeof (tmpName), "%s-%s", strtok(strdup(rp->req_file), "-"), LP_PAPIATTRNAME); path = makepath(Lp_Tmp, tmpName, (char *)0); if (stat(path, &tmpBuf) == 0) { syslog(LOG_DEBUG, "s_start_change_request: attribute file ='%s'", path); /* * IPP job attribute file exists for this job so * change permissions and ownership of the file */ (void) Chown(path, md->uid, rp->secure->gid); Free(path); } else { syslog(LOG_DEBUG, "s_start_change_request: no attribute file"); } #endif add_flt_act(md, FLT_CHANGE, rp); req_file = rp->req_file; } mputm(md, R_START_CHANGE_REQUEST, status, req_file); }
/* * Returns 0 for success, errno value otherwise. * * If the "bind_to_req_port_only" parameter is set and the requested port * number is available, then set allocated_port to it. If not available, * return an error. * * If the "bind_to_req_port_only" parameter is not set and the requested port * number is available, then set allocated_port to it. If not available, * find the first anonymous port we can and set allocated_port to that. If no * anonymous ports are available, return an error. * * In either case, when succeeding, update the sctp_t to record the port number * and insert it in the bind hash table. */ int sctp_bindi(sctp_t *sctp, in_port_t port, boolean_t bind_to_req_port_only, int user_specified, in_port_t *allocated_port) { /* number of times we have run around the loop */ int count = 0; /* maximum number of times to run around the loop */ int loopmax; zoneid_t zoneid = sctp->sctp_zoneid; zone_t *zone = crgetzone(sctp->sctp_credp); /* * Lookup for free addresses is done in a loop and "loopmax" * influences how long we spin in the loop */ if (bind_to_req_port_only) { /* * If the requested port is busy, don't bother to look * for a new one. Setting loop maximum count to 1 has * that effect. */ loopmax = 1; } else { /* * If the requested port is busy, look for a free one * in the anonymous port range. * Set loopmax appropriately so that one does not look * forever in the case all of the anonymous ports are in use. */ loopmax = (sctp_largest_anon_port - sctp_smallest_anon_port + 1); } do { uint16_t lport; sctp_tf_t *tbf; sctp_t *lsctp; int addrcmp; lport = htons(port); /* * Ensure that the sctp_t is not currently in the bind hash. * Hold the lock on the hash bucket to ensure that * the duplicate check plus the insertion is an atomic * operation. * * This function does an inline lookup on the bind hash list * Make sure that we access only members of sctp_t * and that we don't look at sctp_sctp, since we are not * doing a SCTPB_REFHOLD. For more details please see the notes * in sctp_compress() */ sctp_bind_hash_remove(sctp); tbf = &sctp_bind_fanout[SCTP_BIND_HASH(port)]; mutex_enter(&tbf->tf_lock); for (lsctp = tbf->tf_sctp; lsctp != NULL; lsctp = lsctp->sctp_bind_hash) { if (lport != lsctp->sctp_lport || lsctp->sctp_state < SCTPS_BOUND) continue; /* * On a labeled system, we must treat bindings to ports * on shared IP addresses by sockets with MAC exemption * privilege as being in all zones, as there's * otherwise no way to identify the right receiver. */ if (lsctp->sctp_zoneid != zoneid && !lsctp->sctp_mac_exempt && !sctp->sctp_mac_exempt) continue; addrcmp = sctp_compare_saddrs(sctp, lsctp); if (addrcmp != SCTP_ADDR_DISJOINT) { if (!sctp->sctp_reuseaddr) { /* in use */ break; } else if (lsctp->sctp_state == SCTPS_BOUND || lsctp->sctp_state == SCTPS_LISTEN) { /* * socket option SO_REUSEADDR is set * on the binding sctp_t. * * We have found a match of IP source * address and source port, which is * refused regardless of the * SO_REUSEADDR setting, so we break. */ break; } } } if (lsctp != NULL) { /* The port number is busy */ mutex_exit(&tbf->tf_lock); } else { conn_t *connp = sctp->sctp_connp; if (is_system_labeled()) { mlp_type_t addrtype, mlptype; /* * On a labeled system we must check the type * of the binding requested by the user (either * MLP or SLP on shared and private addresses), * and that the user's requested binding * is permitted. */ addrtype = tsol_mlp_addr_type(zone->zone_id, sctp->sctp_ipversion, sctp->sctp_ipversion == IPV4_VERSION ? (void *)&sctp->sctp_ipha->ipha_src : (void *)&sctp->sctp_ip6h->ip6_src); /* * tsol_mlp_addr_type returns the possibilities * for the selected address. Since all local * addresses are either private or shared, the * return value mlptSingle means "local address * not valid (interface not present)." */ if (addrtype == mlptSingle) { mutex_exit(&tbf->tf_lock); return (EADDRNOTAVAIL); } mlptype = tsol_mlp_port_type(zone, IPPROTO_SCTP, port, addrtype); if (mlptype != mlptSingle) { if (secpolicy_net_bindmlp(connp-> conn_cred) != 0) { mutex_exit(&tbf->tf_lock); return (EACCES); } /* * If we're binding a shared MLP, then * make sure that this zone is the one * that owns that MLP. Shared MLPs can * be owned by at most one zone. */ if (mlptype == mlptShared && addrtype == mlptShared && connp->conn_zoneid != tsol_mlp_findzone(IPPROTO_SCTP, lport)) { mutex_exit(&tbf->tf_lock); return (EACCES); } connp->conn_mlp_type = mlptype; } } /* * This port is ours. Insert in fanout and mark as * bound to prevent others from getting the port * number. */ sctp->sctp_state = SCTPS_BOUND; sctp->sctp_lport = lport; sctp->sctp_sctph->sh_sport = lport; ASSERT(&sctp_bind_fanout[SCTP_BIND_HASH(port)] == tbf); sctp_bind_hash_insert(tbf, sctp, 1); mutex_exit(&tbf->tf_lock); /* * We don't want sctp_next_port_to_try to "inherit" * a port number supplied by the user in a bind. * * This is the only place where sctp_next_port_to_try * is updated. After the update, it may or may not * be in the valid range. */ if (user_specified == 0) sctp_next_port_to_try = port + 1; *allocated_port = port; return (0); } if ((count == 0) && (user_specified)) { /* * We may have to return an anonymous port. So * get one to start with. */ port = sctp_update_next_port(sctp_next_port_to_try, zone); user_specified = 0; } else { port = sctp_update_next_port(port + 1, zone); } if (port == 0) break; /* * Don't let this loop run forever in the case where * all of the anonymous ports are in use. */ } while (++count < loopmax); return (bind_to_req_port_only ? EADDRINUSE : EADDRNOTAVAIL); }
/* * audit_audit: * Cut and audit record if it is selected. * Return 0, if successfully written. * Return 0, if not written, and not expected to write. * Return -1, if not written because of unexpected error. */ int audit_audit(door_data_t *door_dp) { int ad; if (can_audit() == 0) { return (0); } if (door_dp->audit_na) { if (!audit_na_selected(door_dp)) { return (0); } } else if (!audit_selected(door_dp)) { return (0); } if ((ad = au_open()) == -1) { return (-1); } (void) au_write(ad, au_to_subject_ex(door_dp->audit_auid, door_dp->audit_euid, door_dp->audit_egid, door_dp->audit_uid, door_dp->audit_gid, door_dp->audit_pid, door_dp->audit_asid, &door_dp->audit_tid)); if (is_system_labeled()) (void) au_write(ad, au_to_mylabel()); if (door_dp->audit_policy & AUDIT_GROUP) { int ng; int maxgrp = getgroups(0, NULL); gid_t *grplst = alloca(maxgrp * sizeof (gid_t)); if ((ng = getgroups(maxgrp, grplst))) { (void) au_write(ad, au_to_newgroups(ng, grplst)); } } if (strlen(door_dp->audit_text) != 0) { (void) au_write(ad, au_to_text(door_dp->audit_text)); } if (strlen(door_dp->audit_text1) != 0) { (void) au_write(ad, au_to_text(door_dp->audit_text1)); } if (door_dp->audit_path != NULL) { (void) au_write(ad, au_to_path(door_dp->audit_path)); } #ifdef _LP64 (void) au_write(ad, au_to_return64((door_dp->audit_sorf == 0) ? 0 : -1, (int64_t)door_dp->audit_sorf)); #else (void) au_write(ad, au_to_return32((door_dp->audit_sorf == 0) ? 0 : -1, (int32_t)door_dp->audit_sorf)); #endif if (au_close(ad, 1, door_dp->audit_event) < 0) { (void) au_close(ad, 0, 0); return (-1); } return (0); }
/* * Use SMF error codes only on return or exit. */ int main(int argc, char *argv[]) { struct sigaction act; sigset_t set; uid_t uid; int pfd = -1; uint_t sigval; struct rlimit rl; int orig_limit; smbd.s_pname = basename(argv[0]); openlog(smbd.s_pname, LOG_PID | LOG_NOWAIT, LOG_DAEMON); if (smbd_setup_options(argc, argv) != 0) return (SMF_EXIT_ERR_FATAL); if ((uid = getuid()) != smbd.s_uid) { smbd_report("user %d: %s", uid, strerror(EPERM)); return (SMF_EXIT_ERR_FATAL); } if (getzoneid() != GLOBAL_ZONEID) { smbd_report("non-global zones are not supported"); return (SMF_EXIT_ERR_FATAL); } if (is_system_labeled()) { smbd_report("Trusted Extensions not supported"); return (SMF_EXIT_ERR_FATAL); } if (smbd_already_running()) return (SMF_EXIT_OK); /* * Raise the file descriptor limit to accommodate simultaneous user * authentications/file access. */ if ((getrlimit(RLIMIT_NOFILE, &rl) == 0) && (rl.rlim_cur < rl.rlim_max)) { orig_limit = rl.rlim_cur; rl.rlim_cur = rl.rlim_max; if (setrlimit(RLIMIT_NOFILE, &rl) != 0) smbd_report("Failed to raise file descriptor limit" " from %d to %d", orig_limit, rl.rlim_cur); } (void) sigfillset(&set); (void) sigdelset(&set, SIGABRT); (void) sigfillset(&act.sa_mask); act.sa_handler = smbd_sig_handler; act.sa_flags = 0; (void) sigaction(SIGABRT, &act, NULL); (void) sigaction(SIGTERM, &act, NULL); (void) sigaction(SIGHUP, &act, NULL); (void) sigaction(SIGINT, &act, NULL); (void) sigaction(SIGPIPE, &act, NULL); (void) sigaction(SIGUSR1, &act, NULL); (void) sigdelset(&set, SIGTERM); (void) sigdelset(&set, SIGHUP); (void) sigdelset(&set, SIGINT); (void) sigdelset(&set, SIGPIPE); (void) sigdelset(&set, SIGUSR1); if (smbd.s_fg) { (void) sigdelset(&set, SIGTSTP); (void) sigdelset(&set, SIGTTIN); (void) sigdelset(&set, SIGTTOU); if (smbd_service_init() != 0) { smbd_report("service initialization failed"); exit(SMF_EXIT_ERR_FATAL); } } else { /* * "pfd" is a pipe descriptor -- any fatal errors * during subsequent initialization of the child * process should be written to this pipe and the * parent will report this error as the exit status. */ pfd = smbd_daemonize_init(); if (smbd_service_init() != 0) { smbd_report("daemon initialization failed"); exit(SMF_EXIT_ERR_FATAL); } smbd_daemonize_fini(pfd, SMF_EXIT_OK); } (void) atexit(smb_kmod_stop); while (!smbd.s_shutting_down) { if (smbd.s_sigval == 0 && smbd.s_refreshes == 0) (void) sigsuspend(&set); sigval = atomic_swap_uint(&smbd.s_sigval, 0); switch (sigval) { case 0: case SIGPIPE: case SIGABRT: break; case SIGHUP: syslog(LOG_DEBUG, "refresh requested"); (void) pthread_cond_signal(&refresh_cond); break; case SIGUSR1: smb_log_dumpall(); break; default: /* * Typically SIGINT or SIGTERM. */ smbd.s_shutting_down = B_TRUE; break; } } smbd_service_fini(); closelog(); return ((smbd.s_fatal_error) ? SMF_EXIT_ERR_FATAL : SMF_EXIT_OK); }
static void generate_record( char *locuser, /* username of local user */ int err, /* error status */ /* (=0 success, >0 error code) */ char *msg) /* error message */ { int rd; /* audit record descriptor */ char buf[256]; /* temporary buffer */ uid_t uid; gid_t gid; uid_t ruid; /* real uid */ gid_t rgid; /* real gid */ pid_t pid; struct passwd *pwd; uid_t ceuid; /* current effective uid */ struct auditinfo_addr info; if (cannot_audit(0)) { return; } pwd = getpwnam(locuser); if (pwd == NULL) { uid = (uid_t)-1; gid = (gid_t)-1; } else { uid = pwd->pw_uid; gid = pwd->pw_gid; } ceuid = geteuid(); /* save current euid */ (void) seteuid(0); /* change to root so you can audit */ /* determine if we're preselected */ if (!selected(uid, locuser, AUE_ftpd, err)) { (void) seteuid(ceuid); return; } ruid = getuid(); /* get real uid */ rgid = getgid(); /* get real gid */ pid = getpid(); /* see if terminal id already set */ if (getaudit_addr(&info, sizeof (info)) < 0) { perror("getaudit"); } rd = au_open(); /* add subject token */ (void) au_write(rd, au_to_subject_ex(uid, uid, gid, ruid, rgid, pid, pid, &info.ai_termid)); if (is_system_labeled()) (void) au_write(rd, au_to_mylabel()); /* add return token */ errno = 0; if (err) { /* add reason for failure */ if (err == UNKNOWN_USER) (void) snprintf(buf, sizeof (buf), "%s %s", msg, locuser); else (void) snprintf(buf, sizeof (buf), "%s", msg); (void) au_write(rd, au_to_text(buf)); #ifdef _LP64 (void) au_write(rd, au_to_return64(-1, (int64_t)err)); #else (void) au_write(rd, au_to_return32(-1, (int32_t)err)); #endif } else { #ifdef _LP64 (void) au_write(rd, au_to_return64(0, (int64_t)0)); #else (void) au_write(rd, au_to_return32(0, (int32_t)0)); #endif } /* write audit record */ if (au_close(rd, 1, AUE_ftpd) < 0) { (void) au_close(rd, 0, 0); } (void) seteuid(ceuid); }
int main(int argc, char **argv) { int ret; int i; (void) setlocale(LC_ALL, ""); (void) textdomain(TEXT_DOMAIN); (void) malloc(0); /* satisfy libumem dependency */ progname = basename(argv[0]); if (getzoneid() != GLOBAL_ZONEID) { (void) fprintf(stderr, gettext("cannot execute in non-global zone\n")); return (0); } if (is_system_labeled()) { (void) fprintf(stderr, gettext("Trusted Extensions not supported\n")); return (0); } if (argc < 2) { (void) fprintf(stderr, gettext("missing command\n")); smbadm_usage(B_FALSE); } /* * Special case "cmd --help/-?" */ if (strcmp(argv[1], "-?") == 0 || strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0) smbadm_usage(B_TRUE); for (i = 0; i < SMBADM_NCMD; ++i) { curcmd = &smbadm_cmdtable[i]; if (strcasecmp(argv[1], curcmd->name) == 0) { if (argc > 2) { /* cmd subcmd --help/-? */ if (strcmp(argv[2], "-?") == 0 || strcmp(argv[2], "--help") == 0 || strcmp(argv[2], "-h") == 0) smbadm_usage(B_TRUE); } if (!smbadm_checkauth(curcmd->auth)) { (void) fprintf(stderr, gettext("%s: %s: authorization denied\n"), progname, curcmd->name); return (1); } if ((ret = smbadm_init()) != 0) return (ret); ret = curcmd->func(argc - 1, &argv[1]); smbadm_fini(); return (ret); } } curcmd = NULL; (void) fprintf(stderr, gettext("unknown subcommand (%s)\n"), argv[1]); smbadm_usage(B_FALSE); return (2); }
void s_end_change_request(char *m, MESG *md) { char *req_id; RSTATUS *rp; off_t size; off_t osize; short err; short status; REQUEST *r = 0; REQUEST oldr; int call_schedule = 0; int move_ok = 0; char *path; char tmpName[BUFSIZ]; struct stat tmpBuf; (void) getmessage(m, S_END_CHANGE_REQUEST, &req_id); syslog(LOG_DEBUG, "s_end_change_request(%s)", (req_id ? req_id : "NULL")); if (!(rp = request_by_id(req_id))) status = MUNKNOWN; else if ((md->admin == 0) && (is_system_labeled()) && (md->slabel != NULL) && (rp->secure->slabel != NULL) && (!STREQU(md->slabel, rp->secure->slabel))) status = MUNKNOWN; else if (!(rp->request->outcome & RS_CHANGING)) status = MNOSTART; else { path = makepath(Lp_Tmp, rp->req_file, (char *)0); (void) chownmod(path, Lp_Uid, Lp_Gid, 0644); Free(path); #ifdef LP_USE_PAPI_ATTR /* * Check if the PAPI job attribute file exists, * if it does change the permission and the ownership * of the file to be "Lp_Uid". */ snprintf(tmpName, sizeof (tmpName), "%s-%s", strtok(strdup(rp->req_file), "-"), LP_PAPIATTRNAME); path = makepath(Lp_Tmp, tmpName, (char *)0); if (stat(path, &tmpBuf) == 0) { syslog(LOG_DEBUG, "s_end_change_request: attribute file ='%s'", path); /* * IPP job attribute file exists for this job so * change permissions and ownership of the file */ (void) chownmod(path, Lp_Uid, Lp_Gid, 0644); Free(path); } else { syslog(LOG_DEBUG, "s_end_change_request: no attribute file"); } #endif rp->request->outcome &= ~(RS_CHANGING); del_flt_act(md, FLT_CHANGE); /* * The RS_CHANGING bit may have been the only thing * preventing this request from filtering or printing, * so regardless of what happens below, * we must check to see if the request can proceed. */ call_schedule = 1; if (!(r = Getrequest(rp->req_file))) status = MNOOPEN; else { oldr = *(rp->request); *(rp->request) = *r; move_ok = STREQU(oldr.destination, r->destination); /* * Preserve the current request status! */ rp->request->outcome = oldr.outcome; /* * Here's an example of the dangers one meets * when public flags are used for private * purposes. ".actions" (indeed, anything in the * REQUEST structure) is set by the person * changing the job. However, lpsched uses * ".actions" as place to indicate that a job * came from a remote system and we must send * back job completion--this is a strictly * private flag that we must preserve. */ rp->request->actions |= (oldr.actions & ACT_NOTIFY); if ((rp->request->actions & ACT_SPECIAL) == ACT_HOLD) { rp->request->outcome |= RS_HELD; /* * To be here means either the user owns * the request or he or she is the * administrator. Since we don't want to * set the RS_ADMINHELD flag if the user * is the administrator, the following * compare will work. */ if (md->uid != rp->secure->uid) rp->request->outcome |= RS_ADMINHELD; } if ((rp->request->actions & ACT_SPECIAL) == ACT_RESUME) { if ((rp->request->outcome & RS_ADMINHELD) && !md->admin) { status = MNOPERM; goto Return; } rp->request->outcome &= ~(RS_ADMINHELD|RS_HELD); } if ((rp->request->actions & ACT_SPECIAL) == ACT_IMMEDIATE) { if (!md->admin) { status = MNOPERM; goto Return; } rp->request->outcome |= RS_IMMEDIATE; } size = chfiles(rp->request->file_list, Lp_Uid, Lp_Gid); if (size < 0) { status = MUNKNOWN; goto Return; } if (!(rp->request->outcome & RS_HELD) && size == 0) { status = MNOPERM; goto Return; } osize = rp->secure->size; rp->secure->size = size; if (move_ok == 0) { char *dest = strdup(r->destination); if ((status = mv_file(rp, dest)) == MOK) rp->secure->size = osize; free(dest); } else if ((err = validate_request(rp, (char **)0, move_ok)) != MOK) { status = err; rp->secure->size = osize; } else { status = MOK; if ((rp->request->outcome & RS_IMMEDIATE) || (rp->request->priority != oldr.priority)) { remover(rp); insertr(rp); } freerequest(&oldr); (void) putrequest(rp->req_file, rp->request); /* * fix for bugid 1103890. * use Putsecure instead. */ (void) Putsecure(rp->req_file, rp->secure); } } } Return: if (status != MOK && rp) { if (r) { freerequest(r); *(rp->request) = oldr; } if (status != MNOSTART) (void) putrequest(rp->req_file, rp->request); } if (call_schedule) maybe_schedule(rp); mputm(md, R_END_CHANGE_REQUEST, status, chkprinter_result); }
/* * Check mount requests, add to mounted list if ok */ static void mount(struct svc_req *rqstp) { SVCXPRT *transp; int version, vers; struct fhstatus fhs; struct mountres3 mountres3; char fh[FHSIZE3]; int len = FHSIZE3; char *path, rpath[MAXPATHLEN]; struct share *sh = NULL; struct nd_hostservlist *clnames = NULL; char *host = NULL; int error = 0, lofs_tried = 0; int flavor_list[MAX_FLAVORS]; int flavor_count; struct netbuf *nb; transp = rqstp->rq_xprt; version = rqstp->rq_vers; path = NULL; if (!svc_getargs(transp, xdr_dirpath, (caddr_t)&path)) { svcerr_decode(transp); return; } getclientsnames(transp, &nb, &clnames); if (clnames == NULL || nb == NULL) { /* * We failed to get a name for the client, even 'anon', * probably because we ran out of memory. In this situation * it doesn't make sense to allow the mount to succeed. */ error = EACCES; goto reply; } host = clnames->h_hostservs[0].h_host; /* * If the version being used is less than the minimum version, * the filehandle translation should not be provided to the * client. */ if (rejecting || version < mount_vers_min) { if (verbose) syslog(LOG_NOTICE, "Rejected mount: %s for %s", host, path); error = EACCES; goto reply; } /* * Trusted Extension doesn't support older versions of nfs(v2, v3). * To prevent circumventing TX label policy via using an older * version of nfs client, reject the mount request and log an * error. */ if (is_system_labeled()) { syslog(LOG_ERR, "mount rejected: Solaris TX only supports nfs4 clients"); error = EACCES; goto reply; } /* * Get the real path (no symbolic links in it) */ if (realpath(path, rpath) == NULL) { error = errno; if (verbose) syslog(LOG_ERR, "mount request: realpath: %s: %m", path); if (error == ENOENT) error = mount_enoent_error(path, rpath, clnames, nb, flavor_list); goto reply; } if ((sh = findentry(rpath)) == NULL && (sh = find_lofsentry(rpath, &lofs_tried)) == NULL) { error = EACCES; goto reply; } /* * Check if this is a "nosub" only export, in which case, mounting * subdirectories isn't allowed. Bug 1184573. */ if (checkrootmount(sh, rpath) == 0) { error = EACCES; goto reply; } if (newopts(sh->sh_opts)) flavor_count = getclientsflavors_new(sh, nb, clnames, flavor_list); else flavor_count = getclientsflavors_old(sh, nb, clnames, flavor_list); if (flavor_count == 0) { error = EACCES; goto reply; } /* * Now get the filehandle. * * NFS V2 clients get a 32 byte filehandle. * NFS V3 clients get a 32 or 64 byte filehandle, depending on * the embedded FIDs. */ vers = (version == MOUNTVERS3) ? NFS_V3 : NFS_VERSION; /* LINTED pointer alignment */ while (nfs_getfh(rpath, vers, &len, fh) < 0) { if (errno == EINVAL && (sh = find_lofsentry(rpath, &lofs_tried)) != NULL) { errno = 0; continue; } error = errno == EINVAL ? EACCES : errno; syslog(LOG_DEBUG, "mount request: getfh failed on %s: %m", path); break; } if (version == MOUNTVERS3) { mountres3.mountres3_u.mountinfo.fhandle.fhandle3_len = len; mountres3.mountres3_u.mountinfo.fhandle.fhandle3_val = fh; } else { bcopy(fh, &fhs.fhstatus_u.fhs_fhandle, NFS_FHSIZE); } reply: switch (version) { case MOUNTVERS: case MOUNTVERS_POSIX: if (error == EINVAL) fhs.fhs_status = NFSERR_ACCES; else if (error == EREMOTE) fhs.fhs_status = NFSERR_REMOTE; else fhs.fhs_status = error; if (!svc_sendreply(transp, xdr_fhstatus, (char *)&fhs)) log_cant_reply(transp); audit_mountd_mount(host, path, fhs.fhs_status); /* BSM */ break; case MOUNTVERS3: if (!error) { mountres3.mountres3_u.mountinfo.auth_flavors.auth_flavors_val = flavor_list; mountres3.mountres3_u.mountinfo.auth_flavors.auth_flavors_len = flavor_count; } else if (error == ENAMETOOLONG) error = MNT3ERR_NAMETOOLONG; mountres3.fhs_status = error; if (!svc_sendreply(transp, xdr_mountres3, (char *)&mountres3)) log_cant_reply(transp); audit_mountd_mount(host, path, mountres3.fhs_status); /* BSM */ break; } if (verbose) syslog(LOG_NOTICE, "MOUNT: %s %s %s", (host == NULL) ? "unknown host" : host, error ? "denied" : "mounted", path); if (path != NULL) svc_freeargs(transp, xdr_dirpath, (caddr_t)&path); if (!error) mntlist_new(host, rpath); /* add entry to mount list */ done: if (sh) sharefree(sh); netdir_free(clnames, ND_HOSTSERVLIST); }
int sctp_listen(sctp_t *sctp) { sctp_tf_t *tf; sctp_stack_t *sctps = sctp->sctp_sctps; conn_t *connp = sctp->sctp_connp; RUN_SCTP(sctp); /* * TCP handles listen() increasing the backlog, need to check * if it should be handled here too */ if (sctp->sctp_state > SCTPS_BOUND || (sctp->sctp_connp->conn_state_flags & CONN_CLOSING)) { WAKE_SCTP(sctp); return (EINVAL); } /* Do an anonymous bind for unbound socket doing listen(). */ if (sctp->sctp_nsaddrs == 0) { struct sockaddr_storage ss; int ret; bzero(&ss, sizeof (ss)); ss.ss_family = connp->conn_family; WAKE_SCTP(sctp); if ((ret = sctp_bind(sctp, (struct sockaddr *)&ss, sizeof (ss))) != 0) return (ret); RUN_SCTP(sctp) } /* Cache things in the ixa without any refhold */ ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED)); connp->conn_ixa->ixa_cred = connp->conn_cred; connp->conn_ixa->ixa_cpid = connp->conn_cpid; if (is_system_labeled()) connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred); sctp->sctp_state = SCTPS_LISTEN; (void) random_get_pseudo_bytes(sctp->sctp_secret, SCTP_SECRET_LEN); sctp->sctp_last_secret_update = ddi_get_lbolt64(); bzero(sctp->sctp_old_secret, SCTP_SECRET_LEN); /* * If there is an association limit, allocate and initialize * the counter struct. Note that since listen can be called * multiple times, the struct may have been allready allocated. */ if (!list_is_empty(&sctps->sctps_listener_conf) && sctp->sctp_listen_cnt == NULL) { sctp_listen_cnt_t *slc; uint32_t ratio; ratio = sctp_find_listener_conf(sctps, ntohs(connp->conn_lport)); if (ratio != 0) { uint32_t mem_ratio, tot_buf; slc = kmem_alloc(sizeof (sctp_listen_cnt_t), KM_SLEEP); /* * Calculate the connection limit based on * the configured ratio and maxusers. Maxusers * are calculated based on memory size, * ~ 1 user per MB. Note that the conn_rcvbuf * and conn_sndbuf may change after a * connection is accepted. So what we have * is only an approximation. */ if ((tot_buf = connp->conn_rcvbuf + connp->conn_sndbuf) < MB) { mem_ratio = MB / tot_buf; slc->slc_max = maxusers / ratio * mem_ratio; } else { mem_ratio = tot_buf / MB; slc->slc_max = maxusers / ratio / mem_ratio; } /* At least we should allow some associations! */ if (slc->slc_max < sctp_min_assoc_listener) slc->slc_max = sctp_min_assoc_listener; slc->slc_cnt = 1; slc->slc_drop = 0; sctp->sctp_listen_cnt = slc; } } tf = &sctps->sctps_listen_fanout[SCTP_LISTEN_HASH( ntohs(connp->conn_lport))]; sctp_listen_hash_insert(tf, sctp); WAKE_SCTP(sctp); return (0); }
/*ARGSUSED*/ static int lo_mount(struct vfs *vfsp, struct vnode *vp, struct mounta *uap, struct cred *cr) { int error; struct vnode *srootvp = NULL; /* the server's root */ struct vnode *realrootvp; struct loinfo *li; int nodev; nodev = vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL); if ((error = secpolicy_fs_mount(cr, vp, vfsp)) != 0) return (EPERM); /* * Loopback devices which get "nodevices" added can be done without * "nodevices" set because we cannot import devices into a zone * with loopback. Note that we have all zone privileges when * this happens; if not, we'd have gotten "nosuid". */ if (!nodev && vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) vfs_setmntopt(vfsp, MNTOPT_DEVICES, NULL, VFS_NODISPLAY); mutex_enter(&vp->v_lock); if (!(uap->flags & MS_OVERLAY) && (vp->v_count != 1 || (vp->v_flag & VROOT))) { mutex_exit(&vp->v_lock); return (EBUSY); } mutex_exit(&vp->v_lock); /* * Find real root, and make vfs point to real vfs */ if (error = lookupname(uap->spec, (uap->flags & MS_SYSSPACE) ? UIO_SYSSPACE : UIO_USERSPACE, FOLLOW, NULLVPP, &realrootvp)) return (error); /* * Enforce MAC policy if needed. * * Loopback mounts must not allow writing up. The dominance test * is intended to prevent a global zone caller from accidentally * creating write-up conditions between two labeled zones. * Local zones can't violate MAC on their own without help from * the global zone because they can't name a pathname that * they don't already have. * * The special case check for the NET_MAC_AWARE process flag is * to support the case of the automounter in the global zone. We * permit automounting of local zone directories such as home * directories, into the global zone as required by setlabel, * zonecopy, and saving of desktop sessions. Such mounts are * trusted not to expose the contents of one zone's directories * to another by leaking them through the global zone. */ if (is_system_labeled() && crgetzoneid(cr) == GLOBAL_ZONEID) { char specname[MAXPATHLEN]; zone_t *from_zptr; zone_t *to_zptr; if (vnodetopath(NULL, realrootvp, specname, sizeof (specname), CRED()) != 0) { VN_RELE(realrootvp); return (EACCES); } from_zptr = zone_find_by_path(specname); to_zptr = zone_find_by_path(refstr_value(vfsp->vfs_mntpt)); /* * Special case for zone devfs: the zone for /dev will * incorrectly appear as the global zone since it's not * under the zone rootpath. So for zone devfs check allow * read-write mounts. * * Second special case for scratch zones used for Live Upgrade: * this is used to mount the zone's root from /root to /a in * the scratch zone. As with the other special case, this * appears to be outside of the zone because it's not under * the zone rootpath, which is $ZONEPATH/lu in the scratch * zone case. */ if (from_zptr != to_zptr && !(to_zptr->zone_flags & ZF_IS_SCRATCH)) { /* * We know at this point that the labels aren't equal * because the zone pointers aren't equal, and zones * can't share a label. * * If the source is the global zone then making * it available to a local zone must be done in * read-only mode as the label will become admin_low. * * If it is a mount between local zones then if * the current process is in the global zone and has * the NET_MAC_AWARE flag, then regular read-write * access is allowed. If it's in some other zone, but * the label on the mount point dominates the original * source, then allow the mount as read-only * ("read-down"). */ if (from_zptr->zone_id == GLOBAL_ZONEID) { /* make the mount read-only */ vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); } else { /* cross-zone mount */ if (to_zptr->zone_id == GLOBAL_ZONEID && /* LINTED: no consequent */ getpflags(NET_MAC_AWARE, cr) != 0) { /* Allow the mount as read-write */ } else if (bldominates( label2bslabel(to_zptr->zone_slabel), label2bslabel(from_zptr->zone_slabel))) { /* make the mount read-only */ vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); } else { VN_RELE(realrootvp); zone_rele(to_zptr); zone_rele(from_zptr); return (EACCES); } } } zone_rele(to_zptr); zone_rele(from_zptr); } /* * realrootvp may be an AUTOFS node, in which case we * perform a VOP_ACCESS() to trigger the mount of the * intended filesystem, so we loopback mount the intended * filesystem instead of the AUTOFS filesystem. */ (void) VOP_ACCESS(realrootvp, 0, 0, cr, NULL); /* * We're interested in the top most filesystem. * This is specially important when uap->spec is a trigger * AUTOFS node, since we're really interested in mounting the * filesystem AUTOFS mounted as result of the VOP_ACCESS() * call not the AUTOFS node itself. */ if (vn_mountedvfs(realrootvp) != NULL) { if (error = traverse(&realrootvp)) { VN_RELE(realrootvp); return (error); } } /* * Allocate a vfs info struct and attach it */ li = kmem_zalloc(sizeof (struct loinfo), KM_SLEEP); li->li_realvfs = realrootvp->v_vfsp; li->li_mountvfs = vfsp; /* * Set mount flags to be inherited by loopback vfs's */ if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) { li->li_mflag |= VFS_RDONLY; } if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) { li->li_mflag |= (VFS_NOSETUID|VFS_NODEVICES); } if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) { li->li_mflag |= VFS_NODEVICES; } if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) { li->li_mflag |= VFS_NOSETUID; } /* * Permissive flags are added to the "deny" bitmap. */ if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) { li->li_dflag |= VFS_XATTR; } if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) { li->li_dflag |= VFS_NBMAND; } /* * Propagate inheritable mount flags from the real vfs. */ if ((li->li_realvfs->vfs_flag & VFS_RDONLY) && !vfs_optionisset(vfsp, MNTOPT_RO, NULL)) vfs_setmntopt(vfsp, MNTOPT_RO, NULL, VFS_NODISPLAY); if ((li->li_realvfs->vfs_flag & VFS_NOSETUID) && !vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) vfs_setmntopt(vfsp, MNTOPT_NOSETUID, NULL, VFS_NODISPLAY); if ((li->li_realvfs->vfs_flag & VFS_NODEVICES) && !vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) vfs_setmntopt(vfsp, MNTOPT_NODEVICES, NULL, VFS_NODISPLAY); /* * Permissive flags such as VFS_XATTR, as opposed to restrictive flags * such as VFS_RDONLY, are handled differently. An explicit * MNTOPT_NOXATTR should override the underlying filesystem's VFS_XATTR. */ if ((li->li_realvfs->vfs_flag & VFS_XATTR) && !vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL) && !vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) vfs_setmntopt(vfsp, MNTOPT_XATTR, NULL, VFS_NODISPLAY); if ((li->li_realvfs->vfs_flag & VFS_NBMAND) && !vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL) && !vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) vfs_setmntopt(vfsp, MNTOPT_NBMAND, NULL, VFS_NODISPLAY); li->li_refct = 0; vfsp->vfs_data = (caddr_t)li; vfsp->vfs_bcount = 0; vfsp->vfs_fstype = lofsfstype; vfsp->vfs_bsize = li->li_realvfs->vfs_bsize; vfsp->vfs_dev = li->li_realvfs->vfs_dev; vfsp->vfs_fsid.val[0] = li->li_realvfs->vfs_fsid.val[0]; vfsp->vfs_fsid.val[1] = li->li_realvfs->vfs_fsid.val[1]; if (vfs_optionisset(vfsp, MNTOPT_LOFS_NOSUB, NULL)) { li->li_flag |= LO_NOSUB; } /* * Propagate any VFS features */ vfs_propagate_features(li->li_realvfs, vfsp); /* * Setup the hashtable. If the root of this mount isn't a directory, * there's no point in allocating a large hashtable. A table with one * bucket is sufficient. */ if (realrootvp->v_type != VDIR) lsetup(li, 1); else lsetup(li, 0); /* * Make the root vnode */ srootvp = makelonode(realrootvp, li, 0); srootvp->v_flag |= VROOT; li->li_rootvp = srootvp; #ifdef LODEBUG lo_dprint(4, "lo_mount: vfs %p realvfs %p root %p realroot %p li %p\n", vfsp, li->li_realvfs, srootvp, realrootvp, li); #endif return (0); }
/* * Use SMF error codes only on return or exit. */ int main(int argc, char *argv[]) { sigset_t set; uid_t uid; int pfd = -1; int sigval; struct rlimit rl; int orig_limit; #ifdef FKSMBD fksmbd_init(); #endif smbd.s_pname = basename(argv[0]); openlog(smbd.s_pname, LOG_PID | LOG_NOWAIT, LOG_DAEMON); if (smbd_setup_options(argc, argv) != 0) return (SMF_EXIT_ERR_FATAL); if ((uid = getuid()) != smbd.s_uid) { #ifdef FKSMBD /* Can't manipulate privileges in daemonize. */ if (smbd.s_fg == 0) { smbd.s_fg = 1; smbd_report("user %d (forced -f)", uid); } #else /* FKSMBD */ smbd_report("user %d: %s", uid, strerror(EPERM)); return (SMF_EXIT_ERR_FATAL); #endif /* FKSMBD */ } if (is_system_labeled()) { smbd_report("Trusted Extensions not supported"); return (SMF_EXIT_ERR_FATAL); } if (smbd_already_running()) return (SMF_EXIT_OK); /* * Raise the file descriptor limit to accommodate simultaneous user * authentications/file access. */ if ((getrlimit(RLIMIT_NOFILE, &rl) == 0) && (rl.rlim_cur < rl.rlim_max)) { orig_limit = rl.rlim_cur; rl.rlim_cur = rl.rlim_max; if (setrlimit(RLIMIT_NOFILE, &rl) != 0) smbd_report("Failed to raise file descriptor limit" " from %d to %d", orig_limit, rl.rlim_cur); } /* * Block async signals in all threads. */ (void) sigemptyset(&set); (void) sigaddset(&set, SIGHUP); (void) sigaddset(&set, SIGINT); (void) sigaddset(&set, SIGQUIT); (void) sigaddset(&set, SIGPIPE); (void) sigaddset(&set, SIGTERM); (void) sigaddset(&set, SIGUSR1); (void) sigaddset(&set, SIGUSR2); (void) sigprocmask(SIG_SETMASK, &set, NULL); if (smbd.s_fg) { if (smbd_service_init() != 0) { smbd_report("service initialization failed"); exit(SMF_EXIT_ERR_FATAL); } } else { /* * "pfd" is a pipe descriptor -- any fatal errors * during subsequent initialization of the child * process should be written to this pipe and the * parent will report this error as the exit status. */ pfd = smbd_daemonize_init(); if (smbd_service_init() != 0) { smbd_report("daemon initialization failed"); exit(SMF_EXIT_ERR_FATAL); } smbd_daemonize_fini(pfd, SMF_EXIT_OK); } while (!smbd.s_shutting_down) { sigval = sigwait(&set); switch (sigval) { case -1: syslog(LOG_DEBUG, "sigwait failed: %s", strerror(errno)); break; case SIGPIPE: break; case SIGHUP: syslog(LOG_DEBUG, "refresh requested"); smbd_refresh_handler(); break; case SIGUSR1: syslog(LOG_DEBUG, "SIGUSR1 ignored"); break; default: /* * Typically SIGINT or SIGTERM. */ smbd.s_shutting_down = B_TRUE; break; } } /* * Allow termination signals while shutting down. */ (void) sigemptyset(&set); if (smbd.s_fg) { (void) sigaddset(&set, SIGHUP); (void) sigaddset(&set, SIGINT); } (void) sigaddset(&set, SIGTERM); (void) sigprocmask(SIG_UNBLOCK, &set, NULL); smbd_service_fini(); return ((smbd.s_fatal_error) ? SMF_EXIT_ERR_FATAL : SMF_EXIT_OK); }