static int uipc_rcvd(struct socket *so, int flags) { struct unpcb *unp = sotounpcb(so); struct socket *so2; if (unp == 0) return EINVAL; switch (so->so_type) { case SOCK_DGRAM: panic("uipc_rcvd DGRAM?"); /*NOTREACHED*/ case SOCK_STREAM: if (unp->unp_conn == 0) break; so2 = unp->unp_conn->unp_socket; /* * Adjust backpressure on sender * and wakeup any waiting to write. */ so2->so_snd.sb_mbmax += unp->unp_mbcnt - so->so_rcv.sb_mbcnt; unp->unp_mbcnt = so->so_rcv.sb_mbcnt; so2->so_snd.sb_hiwat += unp->unp_cc - so->so_rcv.sb_cc; (void)chgsbsize(so2->so_cred->cr_uid, (rlim_t)unp->unp_cc - so->so_rcv.sb_cc); unp->unp_cc = so->so_rcv.sb_cc; sowwakeup(so2); break; default: panic("uipc_rcvd unknown socktype"); } return 0; }
/* * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't * become limiting if buffering efficiency is near the normal case. */ int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, struct thread *td) { rlim_t sbsize_limit; SOCKBUF_LOCK_ASSERT(sb); /* * When a thread is passed, we take into account the thread's socket * buffer size limit. The caller will generally pass curthread, but * in the TCP input path, NULL will be passed to indicate that no * appropriate thread resource limits are available. In that case, * we don't apply a process limit. */ if (cc > sb_max_adj) return (0); if (td != NULL) { sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); } else sbsize_limit = RLIM_INFINITY; if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, sbsize_limit)) return (0); sb->sb_mbmax = min(cc * sb_efficiency, sb_max); if (sb->sb_lowat > sb->sb_hiwat) sb->sb_lowat = sb->sb_hiwat; return (1); }
static void sodealloc(struct socket *so) { if (so->so_rcv.ssb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); if (so->so_snd.ssb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); #ifdef INET /* remove accept filter if present */ if (so->so_accf != NULL) do_setopt_accept_filter(so, NULL); #endif /* INET */ crfree(so->so_cred); kfree(so, M_SOCKET); }
/* * Free mbufs held by a socket, and reserved mbuf space. */ void sbrelease_internal(struct sockbuf *sb, struct socket *so) { sbflush_internal(sb); (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY); sb->sb_mbmax = 0; }
/* * Free mbufs held by a socket, and reserved mbuf space. We do not assert * that the socket is held locked here: see sorflush(). */ void sbrelease(struct sockbuf *sb, struct socket *so) { KASSERT(sb->sb_so == so); sbflush(sb); (void)chgsbsize(so->so_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY); sb->sb_mbmax = 0; }
/* * Free mbufs held by a socket, and reserved mbuf space. */ void ofp_sbrelease_internal(struct sockbuf *sb, struct socket *so) { (void)so; sbflush_internal(sb); #if 0 /* HJo */ (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY); #else sb->sb_hiwat = 0; #endif sb->sb_mbmax = 0; }
/* * Allot mbufs to a sockbuf. * Attempt to scale mbmax so that mbcnt doesn't become limiting * if buffering efficiency is near the normal case. */ int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so) { struct lwp *l = curlwp; /* XXX */ rlim_t maxcc; struct uidinfo *uidinfo; KASSERT(so->so_lock == NULL || solocked(so)); KASSERT(sb->sb_so == so); KASSERT(sb_max_adj != 0); if (cc == 0 || cc > sb_max_adj) return (0); maxcc = l->l_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur; uidinfo = so->so_uidinfo; if (!chgsbsize(uidinfo, &sb->sb_hiwat, cc, maxcc)) return 0; sb->sb_mbmax = min(cc * 2, sb_max); if (sb->sb_lowat > sb->sb_hiwat) sb->sb_lowat = sb->sb_hiwat; return (1); }
static int uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct proc *p) { int error = 0; struct unpcb *unp = sotounpcb(so); struct socket *so2; u_long newhiwat; if (unp == 0) { error = EINVAL; goto release; } if (flags & PRUS_OOB) { error = EOPNOTSUPP; goto release; } if (control && (error = unp_internalize(control, p))) goto release; switch (so->so_type) { case SOCK_DGRAM: { struct sockaddr *from; if (nam) { if (unp->unp_conn) { error = EISCONN; break; } error = unp_connect(so, nam, p); if (error) break; } else { if (unp->unp_conn == 0) { error = ENOTCONN; break; } } so2 = unp->unp_conn->unp_socket; if (unp->unp_addr) from = (struct sockaddr *)unp->unp_addr; else from = &sun_noname; if (sbappendaddr(&so2->so_rcv, from, m, control)) { sorwakeup(so2); m = 0; control = 0; } else error = ENOBUFS; if (nam) unp_disconnect(unp); break; } case SOCK_STREAM: /* Connect if not connected yet. */ /* * Note: A better implementation would complain * if not equal to the peer's address. */ if ((so->so_state & SS_ISCONNECTED) == 0) { if (nam) { error = unp_connect(so, nam, p); if (error) break; /* XXX */ } else { error = ENOTCONN; break; } } if (so->so_state & SS_CANTSENDMORE) { error = EPIPE; break; } if (unp->unp_conn == 0) panic("uipc_send connected but no connection?"); so2 = unp->unp_conn->unp_socket; /* * Send to paired receive port, and then reduce * send buffer hiwater marks to maintain backpressure. * Wake up readers. */ if (control) { if (sbappendcontrol(&so2->so_rcv, m, control)) control = 0; } else sbappend(&so2->so_rcv, m); so->so_snd.sb_mbmax -= so2->so_rcv.sb_mbcnt - unp->unp_conn->unp_mbcnt; unp->unp_conn->unp_mbcnt = so2->so_rcv.sb_mbcnt; newhiwat = so->so_snd.sb_hiwat - (so2->so_rcv.sb_cc - unp->unp_conn->unp_cc); (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat, newhiwat, RLIM_INFINITY); unp->unp_conn->unp_cc = so2->so_rcv.sb_cc; sorwakeup(so2); m = 0; break; default: panic("uipc_send unknown socktype"); } /* * SEND_EOF is equivalent to a SEND followed by * a SHUTDOWN. */ if (flags & PRUS_EOF) { socantsendmore(so); unp_shutdown(unp); } if (control && error != 0) unp_dispose(control); release: if (control) m_freem(control); if (m) m_freem(m); return error; }