static void ncp_watchdog(struct ncp_conn *conn) { char *buf; struct mbuf *m; int error, len, flags; struct socket *so; struct sockaddr *sa; struct uio auio; sa = NULL; while (conn->wdg_so) { /* not a loop */ so = conn->wdg_so; auio.uio_resid = len = 1000000; auio.uio_td = curthread; flags = MSG_DONTWAIT; error = soreceive(so, (struct sockaddr**)&sa, &auio, &m, (struct mbuf**)0, &flags); if (error) break; len -= auio.uio_resid; NCPSDEBUG("got watch dog %d\n",len); if (len != 2) break; buf = mtod(m, char*); if (buf[1] != '?') break; buf[1] = 'Y'; error = sosend(so, (struct sockaddr*)sa, 0, m, 0, 0, curthread); NCPSDEBUG("send watch dog %d\n",error); break; } if (sa) free(sa, M_SONAME); return; }
errno_t xi_sock_receivefrom(xi_socket_t so, void *buf, size_t *len, int flags, struct sockaddr *from, int *fromlen) { struct iovec aio; errno_t error; #ifdef __KPI_SOCKET__ struct msghdr msg; size_t recvLen; aio.iov_base = buf; aio.iov_len = *len; bzero(&msg, sizeof(msg)); msg.msg_iov = (struct iovec *) &aio; msg.msg_iovlen = 1; if(from != NULL && fromlen != NULL && *fromlen > 0) { msg.msg_name = from; msg.msg_namelen = *fromlen; } error = sock_receive(so, &msg, flags, &recvLen); *len = recvLen; return error; #else struct uio auio; struct sockaddr *fromsa = 0; aio.iov_base = (char *)buf; aio.iov_len = *len; auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = 0; /* XXX */ auio.uio_resid = *len; thread_funnel_set(network_flock, TRUE); error = soreceive(so, &fromsa, &auio, NULL, 0, &flags); (void)thread_funnel_set(network_flock, FALSE); if (from != NULL && fromsa) { bcopy(fromsa, from, sizeof(struct sockaddr_lpx)); FREE(fromsa, M_SONAME); } *len = *len - auio.uio_resid; return error; #endif }
static int icl_conn_receive_buf(struct icl_conn *ic, void *buf, size_t len) { struct iovec iov[1]; struct uio uio; struct socket *so; int error, flags; so = ic->ic_socket; memset(&uio, 0, sizeof(uio)); iov[0].iov_base = buf; iov[0].iov_len = len; uio.uio_iov = iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = len; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; flags = MSG_DONTWAIT; error = soreceive(so, NULL, &uio, NULL, NULL, &flags); if (error != 0) { ICL_DEBUG("soreceive error %d", error); return (-1); } if (uio.uio_resid != 0) { ICL_DEBUG("short read"); return (-1); } return (0); }
/* ARGSUSED */ int fifo_read(void *v) { struct vop_read_args *ap = v; struct uio *uio = ap->a_uio; struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock; struct proc *p = uio->uio_procp; int error; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("fifo_read mode"); #endif if (uio->uio_resid == 0) return (0); if (ap->a_ioflag & IO_NDELAY) rso->so_state |= SS_NBIO; VOP_UNLOCK(ap->a_vp, 0, p); error = soreceive(rso, NULL, uio, NULL, NULL, NULL, 0); vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); if (ap->a_ioflag & IO_NDELAY) { rso->so_state &= ~SS_NBIO; if (error == EWOULDBLOCK && ap->a_vp->v_fifoinfo->fi_writers == 0) error = 0; } return (error); }
static struct mbuf * icl_conn_receive(struct icl_conn *ic, size_t len) { struct uio uio; struct socket *so; struct mbuf *m; int error, flags; so = ic->ic_socket; memset(&uio, 0, sizeof(uio)); uio.uio_resid = len; flags = MSG_DONTWAIT; error = soreceive(so, NULL, &uio, &m, NULL, &flags); if (error != 0) { ICL_DEBUG("soreceive error %d", error); return (NULL); } if (uio.uio_resid != 0) { m_freem(m); ICL_DEBUG("short read"); return (NULL); } return (m); }
static int soreceive_fix(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { if ((controlp) && (*controlp)) { m_freem(*controlp); } return (soreceive(so, psa, uio, mp0, controlp, flagsp)); }
/* ARGSUSED */ int soo_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred) { return (soreceive((struct socket *)fp->f_data, (struct mbuf **)0, uio, (struct mbuf **)0, (struct mbuf **)0, (int *)0, (socklen_t)0)); }
/* * receive data in an 'mbuf chain'. * The chain must be released once the * data has been extracted: * * rtems_bsdnet_semaphore_obtain(); * m_freem(chain); * rtems_bsdnet_semaphore_release(); */ ssize_t recv_mbuf_from(int s, struct mbuf **ppm, long len, struct sockaddr *fromaddr, int *fromlen) { int ret = -1; int error; struct uio auio; struct socket *so; struct mbuf *from = NULL; memset(&auio, 0, sizeof(auio)); *ppm = 0; rtems_bsdnet_semaphore_obtain (); if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) { rtems_bsdnet_semaphore_release (); return -1; } /* auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = 0; */ auio.uio_resid = len; error = soreceive (so, &from, &auio, (struct mbuf **) ppm, (struct mbuf **)NULL, NULL); if (error) { if (auio.uio_resid != len && (error == EINTR || error == EWOULDBLOCK)) error = 0; } if (error) { errno = error; } else { ret = len - auio.uio_resid; if (fromaddr) { len = *fromlen; if ((len <= 0) || (from == NULL)) { len = 0; } else { if (len > from->m_len) len = from->m_len; memcpy (fromaddr, mtod(from, caddr_t), len); } *fromlen = len; } } if (from) m_freem (from); if (error && *ppm) { m_freem(*ppm); *ppm = 0; } rtems_bsdnet_semaphore_release (); return (ret); }
int osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec, int nvecs, int *alength) { struct uio u; int i; struct iovec iov[RX_MAXIOVECS]; struct sockaddr *sa = NULL; int code; int haveGlock = ISAFS_GLOCK(); /*AFS_STATCNT(osi_NetReceive); */ if (nvecs > RX_MAXIOVECS) osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs); for (i = 0; i < nvecs; i++) iov[i] = dvec[i]; u.uio_iov = &iov[0]; u.uio_iovcnt = nvecs; u.uio_offset = 0; u.uio_resid = *alength; u.uio_segflg = UIO_SYSSPACE; u.uio_rw = UIO_READ; #ifdef AFS_FBSD50_ENV u.uio_td = NULL; #else u.uio_procp = NULL; #endif if (haveGlock) AFS_GUNLOCK(); code = soreceive(asocket, &sa, &u, NULL, NULL, NULL); if (haveGlock) AFS_GLOCK(); if (code) { #if KNET_DEBUG if (code == EINVAL) Debugger("afs NetReceive busted"); else printf("y"); #else return code; #endif } *alength -= u.uio_resid; if (sa) { if (sa->sa_family == AF_INET) { if (addr) *addr = *(struct sockaddr_in *)sa; } else printf("Unknown socket family %d in NetReceive\n", sa->sa_family); FREE(sa, M_SONAME); } return code; }
int osi_NetReceive(osi_socket asocket, struct sockaddr_in *addr, struct iovec *dvec, int nvecs, int *alength) { struct uio u; int i, code; struct iovec iov[RX_MAXIOVECS]; struct mbuf *nam = NULL; int haveGlock = ISAFS_GLOCK(); memset(&u, 0, sizeof(u)); memset(&iov, 0, sizeof(iov)); if (nvecs > RX_MAXIOVECS) osi_Panic("osi_NetReceive: %d: too many iovecs\n", nvecs); for (i = 0; i < nvecs; i++) iov[i] = dvec[i]; u.uio_iov = &iov[0]; u.uio_iovcnt = nvecs; u.uio_offset = 0; u.uio_resid = *alength; u.uio_segflg = UIO_SYSSPACE; u.uio_rw = UIO_READ; u.uio_procp = NULL; if (haveGlock) AFS_GUNLOCK(); code = soreceive(asocket, (addr ? &nam : NULL), &u, NULL, NULL, NULL #if defined(AFS_OBSD45_ENV) , 0 #endif ); if (haveGlock) AFS_GLOCK(); if (code) { #ifdef RXKNET_DEBUG printf("rx code %d termState %d\n", code, afs_termState); #endif while (afs_termState == AFSOP_STOP_RXEVENT) afs_osi_Sleep(&afs_termState); return code; } *alength -= u.uio_resid; if (addr && nam) { memcpy(addr, mtod(nam, caddr_t), nam->m_len); m_freem(nam); } return code; }
/* * Vnode op for read */ int fifo_read(struct vnop_read_args *ap) { struct uio *uio = ap->a_uio; struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock; user_ssize_t startresid; int error; int rflags; #if DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("fifo_read mode"); #endif if (uio_resid(uio) == 0) return (0); rflags = (ap->a_ioflag & IO_NDELAY) ? MSG_NBIO : 0; startresid = uio_resid(uio); /* fifo conformance - if we have a reader open on the fifo but no * writers then we need to make sure we do not block. We do that by * checking the receive buffer and if empty set error to EWOULDBLOCK. * If error is set to EWOULDBLOCK we skip the call into soreceive */ error = 0; if (ap->a_vp->v_fifoinfo->fi_writers < 1) { socket_lock(rso, 1); error = (rso->so_rcv.sb_cc == 0) ? EWOULDBLOCK : 0; socket_unlock(rso, 1); } /* skip soreceive to avoid blocking when we have no writers */ if (error != EWOULDBLOCK) { error = soreceive(rso, (struct sockaddr **)0, uio, (struct mbuf **)0, (struct mbuf **)0, &rflags); if (error == 0) lock_vnode_and_post(ap->a_vp, 0); } else { /* clear EWOULDBLOCK and return EOF (zero) */ error = 0; } /* * Clear EOF indication after first such return. */ if (uio_resid(uio) == startresid) { socket_lock(rso, 1); rso->so_state &= ~SS_CANTRCVMORE; socket_unlock(rso, 1); } return (error); }
int t_recv (long s, char * buf, int len, int flag) { #ifdef SOCKDEBUG char logbuf[10]; #endif struct socket * so; int err; int sendlen = len; so = LONG2SO(s); #ifdef SOC_CHECK_ALWAYS SOC_CHECK(so); #endif if ((so->so_state & SO_IO_OK) != SS_ISCONNECTED) { so->so_error = EPIPE; #ifdef SOCKDEBUG sprintf(logbuf, "t_recv: %d", so->so_error); glog_with_type(LOG_TYPE_DEBUG, logbuf, 1); #endif return SOCKET_ERROR; } so->so_error = 0; LOCK_NET_RESOURCE(NET_RESID); IN_PROFILER(PF_TCP, PF_ENTRY); /* measure time in TCP */ INET_TRACE (INETM_IO, ("INET:recv: so %x, len %d\n", so, len)); err = soreceive(so, NULL, buf, &len, flag); IN_PROFILER(PF_TCP, PF_EXIT); /* measure time in TCP */ UNLOCK_NET_RESOURCE(NET_RESID); if(err) { so->so_error = err; #ifdef SOCKDEBUG sprintf(logbuf, "t_recv: %d", so->so_error); glog_with_type(LOG_TYPE_DEBUG, logbuf, 1); #endif return SOCKET_ERROR; } /* return bytes we sent - the amount we wanted to send minus * the amount left in the buffer. */ return (sendlen - len); }
static int kttcp_recv(struct thread *td, struct kttcp_io_args *kio) { struct file *fp; int error; struct timeval t0, t1; unsigned long long len = 0; struct uio auio; struct iovec aiov; bzero(&aiov, sizeof(aiov)); bzero(&auio, sizeof(auio)); auio.uio_iov = &aiov; auio.uio_segflg = UIO_NOCOPY; error = fget(td, kio->kio_socket, &fp); if (error != 0) return error; if ((fp->f_flag & FWRITE) == 0) { fdrop(fp, td); return EBADF; } if (fp->f_type == DTYPE_SOCKET) { len = kio->kio_totalsize; microtime(&t0); do { nbyte = MIN(len, (unsigned long long)nbyte); aiov.iov_len = nbyte; auio.uio_resid = nbyte; auio.uio_offset = 0; error = soreceive((struct socket *)fp->f_data, NULL, &auio, NULL, NULL, NULL); len -= auio.uio_offset; } while (error == 0 && len > 0 && auio.uio_offset != 0); microtime(&t1); if (error == EPIPE) error = 0; } else error = EFTYPE; fdrop(fp, td); if (error != 0) return error; timersub(&t1, &t0, &kio->kio_elapsed); kio->kio_bytesdone = kio->kio_totalsize - len; return 0; }
static int soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct socket *so = fp->f_data; int error; #ifdef MAC error = mac_socket_check_receive(active_cred, so); if (error) return (error); #endif error = soreceive(so, 0, uio, 0, 0, 0); return (error); }
/* * Function: receive_packet * Purpose: * Return a received packet or an error if none available. */ static int receive_packet(struct socket * so, void * pp, int psize, int * actual_size) { uio_t auio; int error; int rcvflg; char uio_buf[ UIO_SIZEOF(1) ]; auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(pp), psize); rcvflg = MSG_WAITALL; error = soreceive(so, (struct sockaddr **) 0, auio, 0, 0, &rcvflg); *actual_size = psize - uio_resid(auio); return (error); }
/* ARGSUSED */ int soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct socket *so = fp->f_data; int error; #ifdef MAC error = mac_socket_check_receive(active_cred, so); if (error) return (error); #endif CURVNET_SET(so->so_vnet); error = soreceive(so, 0, uio, 0, 0, 0); CURVNET_RESTORE(); return (error); }
key_receive(struct socket *so, struct mbuf **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) #endif { struct rawcb *rp = sotorawcb(so); struct keycb *kp = (struct keycb *)rp; int error; #ifndef __FreeBSD__ error = (*kp->kp_receive)(so, paddr, uio, mp0, controlp, flagsp); #else error = soreceive(so, paddr, uio, mp0, controlp, flagsp); #endif if (kp->kp_queue && sbspace(&rp->rcb_socket->so_rcv) > kp->kp_queue->m_pkthdr.len) sorwakeup(so); return error; }
int t_recvfrom(long s, char * buf, int len, int flags, struct sockaddr * from, int * fromlen) { struct socket * so; struct mbuf * sender = NULL; int err; int sendlen = len; so = LONG2SO(s); SOC_CHECK(so); so->so_error = 0; LOCK_NET_RESOURCE(NET_RESID); err = soreceive(so, &sender, buf, &len, flags); /* copy sender info from mbuf to sockaddr */ if (sender) { MEMCPY(from, (mtod(sender, struct sockaddr *)), *fromlen ); m_freem (sender); } UNLOCK_NET_RESOURCE(NET_RESID); if(err) { so->so_error = err; return SOCKET_ERROR; } /* OK return: amount of data actually sent */ return (sendlen - len); }
int ncp_sock_recv(struct socket *so, struct mbuf **mp, int *rlen) { struct uio auio; struct thread *td = curthread; /* XXX */ int error,flags,len; auio.uio_resid = len = 1000000; auio.uio_td = td; flags = MSG_DONTWAIT; /* error = soreceive(so, 0, &auio, (struct mbuf **)0, (struct mbuf **)0, &flags);*/ error = soreceive(so, 0, &auio, mp, (struct mbuf **)0, &flags); *rlen = len - auio.uio_resid; /* if (!error) { *rlen=iov.iov_len; } else *rlen=0;*/ #ifdef NCP_SOCKET_DEBUG if (error) printf("ncp_recv: err=%d\n", error); #endif return (error); }
static bool_t svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; struct uio uio; struct mbuf *m; XDR xdrs; int error, rcvflag; /* * Serialise access to the socket and our own record parsing * state. */ sx_xlock(&xprt->xp_lock); for (;;) { /* * If we have an mbuf chain in cd->mpending, try to parse a * record from it, leaving the result in cd->mreq. If we don't * have a complete record, leave the partial result in * cd->mreq and try to read more from the socket. */ if (cd->mpending) { /* * If cd->resid is non-zero, we have part of the * record already, otherwise we are expecting a record * marker. */ if (!cd->resid) { /* * See if there is enough data buffered to * make up a record marker. Make sure we can * handle the case where the record marker is * split across more than one mbuf. */ size_t n = 0; uint32_t header; m = cd->mpending; while (n < sizeof(uint32_t) && m) { n += m->m_len; m = m->m_next; } if (n < sizeof(uint32_t)) goto readmore; if (cd->mpending->m_len < sizeof(uint32_t)) cd->mpending = m_pullup(cd->mpending, sizeof(uint32_t)); memcpy(&header, mtod(cd->mpending, uint32_t *), sizeof(header)); header = ntohl(header); cd->eor = (header & 0x80000000) != 0; cd->resid = header & 0x7fffffff; m_adj(cd->mpending, sizeof(uint32_t)); } /* * Start pulling off mbufs from cd->mpending * until we either have a complete record or * we run out of data. We use m_split to pull * data - it will pull as much as possible and * split the last mbuf if necessary. */ while (cd->mpending && cd->resid) { m = cd->mpending; if (cd->mpending->m_next || cd->mpending->m_len > cd->resid) cd->mpending = m_split(cd->mpending, cd->resid, M_WAIT); else cd->mpending = NULL; if (cd->mreq) m_last(cd->mreq)->m_next = m; else cd->mreq = m; while (m) { cd->resid -= m->m_len; m = m->m_next; } } /* * If cd->resid is zero now, we have managed to * receive a record fragment from the stream. Check * for the end-of-record mark to see if we need more. */ if (cd->resid == 0) { if (!cd->eor) continue; /* * Success - we have a complete record in * cd->mreq. */ xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE); cd->mreq = NULL; sx_xunlock(&xprt->xp_lock); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = NULL; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); } } readmore: /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to * read as much as possible from the socket and put * the result in cd->mpending. If the read fails, * we have drained both cd->mpending and the socket so * we can call xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; m = NULL; rcvflag = MSG_DONTWAIT; CURVNET_SET(xprt->xp_socket->so_vnet); error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL, &rcvflag); CURVNET_RESTORE(); if (error == EWOULDBLOCK) { /* * We must re-test for readability after * taking the lock to protect us in the case * where a new packet arrives on the socket * after our call to soreceive fails with * EWOULDBLOCK. The pool lock protects us from * racing the upcall after our soreadable() * call returns false. */ mtx_lock(&xprt->xp_pool->sp_lock); if (!soreadable(xprt->xp_socket)) xprt_inactive_locked(xprt); mtx_unlock(&xprt->xp_pool->sp_lock); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (!m) { /* * EOF - the other end has closed the socket. */ xprt_inactive(xprt); cd->strm_stat = XPRT_DIED; sx_xunlock(&xprt->xp_lock); return (FALSE); } if (cd->mpending) m_last(cd->mpending)->m_next = m; else cd->mpending = m; }
/* * portal_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred, * struct file *a_fp) */ static int portal_open(struct vop_open_args *ap) { struct socket *so = 0; struct portalnode *pt; struct thread *td = curthread; struct vnode *vp = ap->a_vp; struct uio auio; struct iovec aiov[2]; struct sockbuf sio; int res; struct mbuf *cm = 0; struct cmsghdr *cmsg; int newfds; int *ip; int fd; int error; int len; struct portalmount *fmp; struct file *fp; struct portal_cred pcred; /* * Nothing to do when opening the root node. */ if (vp->v_flag & VROOT) return (vop_stdopen(ap)); /* * Can't be opened unless the caller is set up * to deal with the side effects. Check for this * by testing whether the p_dupfd has been set. */ KKASSERT(td->td_proc); if (td->td_lwp->lwp_dupfd >= 0) return (ENODEV); pt = VTOPORTAL(vp); fmp = VFSTOPORTAL(vp->v_mount); /* * Create a new socket. */ error = socreate(AF_UNIX, &so, SOCK_STREAM, 0, td); if (error) goto bad; /* * Reserve some buffer space */ res = pt->pt_size + sizeof(pcred) + 512; /* XXX */ error = soreserve(so, res, res, &td->td_proc->p_rlimit[RLIMIT_SBSIZE]); if (error) goto bad; /* * Kick off connection */ error = portal_connect(so, (struct socket *)fmp->pm_server->f_data); if (error) goto bad; /* * Wait for connection to complete */ /* * XXX: Since the mount point is holding a reference on the * underlying server socket, it is not easy to find out whether * the server process is still running. To handle this problem * we loop waiting for the new socket to be connected (something * which will only happen if the server is still running) or for * the reference count on the server socket to drop to 1, which * will happen if the server dies. Sleep for 5 second intervals * and keep polling the reference count. XXX. */ crit_enter(); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { if (fmp->pm_server->f_count == 1) { error = ECONNREFUSED; crit_exit(); goto bad; } (void) tsleep((caddr_t) &so->so_timeo, 0, "portalcon", 5 * hz); } crit_exit(); if (so->so_error) { error = so->so_error; goto bad; } /* * Set miscellaneous flags */ so->so_rcv.ssb_timeo = 0; so->so_snd.ssb_timeo = 0; atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR); atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR); pcred.pcr_flag = ap->a_mode; pcred.pcr_uid = ap->a_cred->cr_uid; pcred.pcr_ngroups = ap->a_cred->cr_ngroups; bcopy(ap->a_cred->cr_groups, pcred.pcr_groups, NGROUPS * sizeof(gid_t)); aiov[0].iov_base = (caddr_t) &pcred; aiov[0].iov_len = sizeof(pcred); aiov[1].iov_base = pt->pt_arg; aiov[1].iov_len = pt->pt_size; auio.uio_iov = aiov; auio.uio_iovcnt = 2; auio.uio_rw = UIO_WRITE; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_offset = 0; auio.uio_resid = aiov[0].iov_len + aiov[1].iov_len; error = sosend(so, NULL, &auio, NULL, NULL, 0, td); if (error) goto bad; len = sizeof(int); sbinit(&sio, len); do { struct mbuf *m; int flags; flags = MSG_WAITALL; error = soreceive(so, NULL, NULL, &sio, &cm, &flags); if (error) goto bad; /* * Grab an error code from the mbuf. */ if ((m = sio.sb_mb) != NULL) { m = m_pullup(m, sizeof(int)); /* Needed? */ if (m) { error = *(mtod(m, int *)); m_freem(m); } else { error = EINVAL; } } else {
/* * XXX Liang: timeout for write is not supported yet. */ int libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout) { int rc; CFS_DECL_NET_DATA; while (nob > 0) { struct iovec iov = { .iov_base = buffer, .iov_len = nob }; struct uio suio = { .uio_iov = &iov, .uio_iovcnt = 1, .uio_offset = 0, .uio_resid = nob, .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_WRITE, .uio_procp = NULL }; CFS_NET_IN; rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, 0); CFS_NET_EX; if (rc != 0) { if ( suio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\ rc == EWOULDBLOCK)) rc = 0; if ( rc != 0 ) return -rc; rc = nob - suio.uio_resid; buffer = ((char *)buffer) + rc; nob = suio.uio_resid; continue; } break; } return (0); } /* * XXX Liang: timeout for read is not supported yet. */ int libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout) { int rc; CFS_DECL_NET_DATA; while (nob > 0) { struct iovec iov = { .iov_base = buffer, .iov_len = nob }; struct uio ruio = { .uio_iov = &iov, .uio_iovcnt = 1, .uio_offset = 0, .uio_resid = nob, .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_READ, .uio_procp = NULL }; CFS_NET_IN; rc = soreceive(sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, (int *)0); CFS_NET_EX; if (rc != 0) { if ( ruio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\ rc == EWOULDBLOCK)) rc = 0; if (rc != 0) return -rc; rc = nob - ruio.uio_resid; buffer = ((char *)buffer) + rc; nob = ruio.uio_resid; continue; } break; } return (0); } int libcfs_sock_setbuf (struct socket *sock, int txbufsize, int rxbufsize) { struct sockopt sopt; int rc = 0; int option; CFS_DECL_NET_DATA; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_val = &option; sopt.sopt_valsize = sizeof(option); if (txbufsize != 0) { option = txbufsize; if (option > KSOCK_MAX_BUF) option = KSOCK_MAX_BUF; sopt.sopt_name = SO_SNDBUF; CFS_NET_IN; rc = sosetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't set send buffer %d: %d\n", option, rc); return -rc; } } if (rxbufsize != 0) { option = rxbufsize; sopt.sopt_name = SO_RCVBUF; CFS_NET_IN; rc = sosetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't set receive buffer %d: %d\n", option, rc); return -rc; } } return 0; } int libcfs_sock_getaddr (struct socket *sock, int remote, __u32 *ip, int *port) { struct sockaddr_in *sin; struct sockaddr *sa = NULL; int rc; CFS_DECL_NET_DATA; if (remote != 0) { CFS_NET_IN; rc = sock->so_proto->pr_usrreqs->pru_peeraddr(sock, &sa); CFS_NET_EX; if (rc != 0) { if (sa) FREE(sa, M_SONAME); CERROR ("Error %d getting sock peer IP\n", rc); return -rc; } } else { CFS_NET_IN; rc = sock->so_proto->pr_usrreqs->pru_sockaddr(sock, &sa); CFS_NET_EX; if (rc != 0) { if (sa) FREE(sa, M_SONAME); CERROR ("Error %d getting sock local IP\n", rc); return -rc; } } if (sa != NULL) { sin = (struct sockaddr_in *)sa; if (ip != NULL) *ip = ntohl (sin->sin_addr.s_addr); if (port != NULL) *port = ntohs (sin->sin_port); if (sa) FREE(sa, M_SONAME); } return 0; } int libcfs_sock_getbuf (struct socket *sock, int *txbufsize, int *rxbufsize) { struct sockopt sopt; int rc; CFS_DECL_NET_DATA; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_GET; sopt.sopt_level = SOL_SOCKET; if (txbufsize != NULL) { sopt.sopt_val = txbufsize; sopt.sopt_valsize = sizeof(*txbufsize); sopt.sopt_name = SO_SNDBUF; CFS_NET_IN; rc = sogetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't get send buffer size: %d\n", rc); return -rc; } } if (rxbufsize != NULL) { sopt.sopt_val = rxbufsize; sopt.sopt_valsize = sizeof(*rxbufsize); sopt.sopt_name = SO_RCVBUF; CFS_NET_IN; rc = sogetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't get receive buffer size: %d\n", rc); return -rc; } } return 0; } int libcfs_sock_connect (struct socket **sockp, int *fatal, __u32 local_ip, int local_port, __u32 peer_ip, int peer_port) { struct sockaddr_in srvaddr; struct socket *so; int s; int rc; CFS_DECL_FUNNEL_DATA; rc = libcfs_sock_create(sockp, fatal, local_ip, local_port); if (rc != 0) return rc; so = *sockp; bzero(&srvaddr, sizeof(srvaddr)); srvaddr.sin_len = sizeof(struct sockaddr_in); srvaddr.sin_family = AF_INET; srvaddr.sin_port = htons (peer_port); srvaddr.sin_addr.s_addr = htonl (peer_ip); CFS_NET_IN; rc = soconnect(so, (struct sockaddr *)&srvaddr); if (rc != 0) { CFS_NET_EX; if (rc != EADDRNOTAVAIL && rc != EADDRINUSE) CDEBUG(D_NETERROR, "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc, HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port); goto out; } s = splnet(); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { CDEBUG(D_NET, "ksocknal sleep for waiting auto_connect.\n"); (void) tsleep((caddr_t)&so->so_timeo, PSOCK, "ksocknal_conn", hz); } if ((rc = so->so_error) != 0) { so->so_error = 0; splx(s); CFS_NET_EX; CDEBUG(D_NETERROR, "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc, HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port); goto out; } LASSERT(so->so_state & SS_ISCONNECTED); splx(s); CFS_NET_EX; if (sockp) *sockp = so; return (0); out: CFS_NET_IN; soshutdown(so, 2); soclose(so); CFS_NET_EX; return (-rc); } void libcfs_sock_release (struct socket *sock) { CFS_DECL_FUNNEL_DATA; CFS_NET_IN; soshutdown(sock, 0); CFS_NET_EX; }
int bootpc_call( struct bootp_packet *call, struct bootp_packet *reply, /* output */ struct proc *procp) { struct socket *so; struct sockaddr_in *sin; struct mbuf *m, *nam; struct uio auio; struct iovec aio; int error, rcvflg, timo, secs, len; /* Free at end if not null. */ nam = NULL; /* * Create socket and set its recieve timeout. */ if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0,procp))) goto out; m = m_get(M_WAIT, MT_SOOPTS); if (m == NULL) { error = ENOBUFS; goto out; } else { struct timeval *tv; tv = mtod(m, struct timeval *); m->m_len = sizeof(*tv); tv->tv_sec = 1; tv->tv_usec = 0; if ((error = sosetopt(so, SOL_SOCKET, SO_RCVTIMEO, m))) goto out; } /* * Enable broadcast. */ { int *on; m = m_get(M_WAIT, MT_SOOPTS); if (m == NULL) { error = ENOBUFS; goto out; } on = mtod(m, int *); m->m_len = sizeof(*on); *on = 1; if ((error = sosetopt(so, SOL_SOCKET, SO_BROADCAST, m))) goto out; } /* * Bind the local endpoint to a bootp client port. */ m = m_getclr(M_WAIT, MT_SONAME); sin = mtod(m, struct sockaddr_in *); sin->sin_len = m->m_len = sizeof(*sin); sin->sin_family = AF_INET; sin->sin_addr.s_addr = INADDR_ANY; sin->sin_port = htons(IPPORT_BOOTPC); error = sobind(so, m); m_freem(m); if (error) { printf("bind failed\n"); goto out; } /* * Setup socket address for the server. */ nam = m_get(M_WAIT, MT_SONAME); if (nam == NULL) { error = ENOBUFS; goto out; } sin = mtod(nam, struct sockaddr_in *); sin-> sin_len = sizeof(*sin); sin-> sin_family = AF_INET; sin->sin_addr.s_addr = INADDR_BROADCAST; sin->sin_port = htons(IPPORT_BOOTPS); nam->m_len = sizeof(*sin); /* * Send it, repeatedly, until a reply is received, * but delay each re-send by an increasing amount. * If the delay hits the maximum, start complaining. */ for (timo=1; timo <= MAX_RESEND_DELAY; timo++) { /* Send BOOTP request (or re-send). */ aio.iov_base = (caddr_t) call; aio.iov_len = sizeof(*call); auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_offset = 0; auio.uio_resid = sizeof(*call); auio.uio_procp = procp; error = sosend(so, nam, &auio, NULL, NULL, 0); if (error) { printf("bootpc_call: sosend: %d\n", error); switch (error) { case ENOBUFS: /* No buffer space available */ case ENETUNREACH: /* Network is unreachable */ case ENETDOWN: /* Network interface is not configured */ case EHOSTDOWN: /* Host is down */ case EHOSTUNREACH: /* Host is unreachable */ case EMSGSIZE: /* Message too long */ /* This is a possibly transient error. We can still receive replies from previous attempts. */ break; default: goto out; } } /* * Wait for up to timo seconds for a reply. * The socket receive timeout was set to 1 second. */ secs = timo; while (secs > 0) { aio.iov_base = (caddr_t) reply; aio.iov_len = sizeof(*reply); auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = 0; auio.uio_resid = sizeof(*reply); auio.uio_procp = procp; rcvflg = 0; error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg); if (error == EWOULDBLOCK) { secs--; call->secs=htons(ntohs(call->secs)+1); continue; } if (error) goto out; len = sizeof(*reply) - auio.uio_resid; /* Do we have the required number of bytes ? */ if (len < BOOTP_MIN_LEN) continue; /* Is it the right reply? */ if (reply->op != 2) continue; if (reply->xid != call->xid) continue; if (reply->hlen != call->hlen) continue; if (bcmp(reply->chaddr,call->chaddr,call->hlen)) continue; goto gotreply; /* break two levels */ } /* while secs */ } /* send/receive a number of times then return an error */ { uint32_t addr = ntohl(sin->sin_addr.s_addr); printf("BOOTP timeout for server %"PRIu32".%"PRIu32".%"PRIu32".%"PRIu32"\n", (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff, addr & 0xff); } error = ETIMEDOUT; goto out; gotreply: out: if (nam) m_freem(nam); soclose(so); return error; }
static int bootpc_call(struct bootpc_globalcontext *gctx, struct thread *td) { struct socket *so; struct sockaddr_in *sin, dst; struct uio auio; struct sockopt sopt; struct iovec aio; int error, on, rcvflg, timo, len; time_t atimo; time_t rtimo; struct timeval tv; struct bootpc_ifcontext *ifctx; int outstanding; int gotrootpath; int retry; const char *s; /* * Create socket and set its recieve timeout. */ error = socreate(AF_INET, &so, SOCK_DGRAM, 0, td->td_ucred, td); if (error != 0) goto out0; tv.tv_sec = 1; tv.tv_usec = 0; bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_RCVTIMEO; sopt.sopt_val = &tv; sopt.sopt_valsize = sizeof tv; error = sosetopt(so, &sopt); if (error != 0) goto out; /* * Enable broadcast. */ on = 1; sopt.sopt_name = SO_BROADCAST; sopt.sopt_val = &on; sopt.sopt_valsize = sizeof on; error = sosetopt(so, &sopt); if (error != 0) goto out; /* * Disable routing. */ on = 1; sopt.sopt_name = SO_DONTROUTE; sopt.sopt_val = &on; sopt.sopt_valsize = sizeof on; error = sosetopt(so, &sopt); if (error != 0) goto out; /* * Bind the local endpoint to a bootp client port. */ sin = &dst; clear_sinaddr(sin); sin->sin_port = htons(IPPORT_BOOTPC); error = sobind(so, (struct sockaddr *)sin, td); if (error != 0) { printf("bind failed\n"); goto out; } /* * Setup socket address for the server. */ sin = &dst; clear_sinaddr(sin); sin->sin_addr.s_addr = INADDR_BROADCAST; sin->sin_port = htons(IPPORT_BOOTPS); /* * Send it, repeatedly, until a reply is received, * but delay each re-send by an increasing amount. * If the delay hits the maximum, start complaining. */ timo = 0; rtimo = 0; for (;;) { outstanding = 0; gotrootpath = 0; for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { if (bootpc_ifctx_isresolved(ifctx) != 0 && bootpc_tag(&gctx->tmptag, &ifctx->reply, ifctx->replylen, TAG_ROOT) != NULL) gotrootpath = 1; } for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { ifctx->outstanding = 0; if (bootpc_ifctx_isresolved(ifctx) != 0 && gotrootpath != 0) { continue; } if (bootpc_ifctx_isfailed(ifctx) != 0) continue; outstanding++; ifctx->outstanding = 1; /* Proceed to next step in DHCP negotiation */ if ((ifctx->state == IF_DHCP_OFFERED && ifctx->dhcpquerytype != DHCP_REQUEST) || (ifctx->state == IF_DHCP_UNRESOLVED && ifctx->dhcpquerytype != DHCP_DISCOVER) || (ifctx->state == IF_BOOTP_UNRESOLVED && ifctx->dhcpquerytype != DHCP_NOMSG)) { ifctx->sentmsg = 0; bootpc_compose_query(ifctx, gctx, td); } /* Send BOOTP request (or re-send). */ if (ifctx->sentmsg == 0) { switch(ifctx->dhcpquerytype) { case DHCP_DISCOVER: s = "DHCP Discover"; break; case DHCP_REQUEST: s = "DHCP Request"; break; case DHCP_NOMSG: default: s = "BOOTP Query"; break; } printf("Sending %s packet from " "interface %s (%*D)\n", s, ifctx->ireq.ifr_name, ifctx->sdl->sdl_alen, (unsigned char *) LLADDR(ifctx->sdl), ":"); ifctx->sentmsg = 1; } aio.iov_base = (caddr_t) &ifctx->call; aio.iov_len = sizeof(ifctx->call); auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_offset = 0; auio.uio_resid = sizeof(ifctx->call); auio.uio_td = td; /* Set netmask to 0.0.0.0 */ sin = (struct sockaddr_in *) &ifctx->ireq.ifr_addr; clear_sinaddr(sin); error = ifioctl(ifctx->so, SIOCSIFNETMASK, (caddr_t) &ifctx->ireq, td); if (error != 0) panic("bootpc_call:" "set if netmask, error=%d", error); error = sosend(so, (struct sockaddr *) &dst, &auio, NULL, NULL, 0, td); if (error != 0) { printf("bootpc_call: sosend: %d state %08x\n", error, (int) so->so_state); } /* XXX: Is this needed ? */ pause("bootpw", hz/10); /* Set netmask to 255.0.0.0 */ sin = (struct sockaddr_in *) &ifctx->ireq.ifr_addr; clear_sinaddr(sin); sin->sin_addr.s_addr = htonl(0xff000000u); error = ifioctl(ifctx->so, SIOCSIFNETMASK, (caddr_t) &ifctx->ireq, td); if (error != 0) panic("bootpc_call:" "set if netmask, error=%d", error); } if (outstanding == 0 && (rtimo == 0 || time_second >= rtimo)) { error = 0; goto gotreply; } /* Determine new timeout. */ if (timo < MAX_RESEND_DELAY) timo++; else { printf("DHCP/BOOTP timeout for server "); print_sin_addr(&dst); printf("\n"); } /* * Wait for up to timo seconds for a reply. * The socket receive timeout was set to 1 second. */ atimo = timo + time_second; while (time_second < atimo) { aio.iov_base = (caddr_t) &gctx->reply; aio.iov_len = sizeof(gctx->reply); auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = 0; auio.uio_resid = sizeof(gctx->reply); auio.uio_td = td; rcvflg = 0; error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg); gctx->secs = time_second - gctx->starttime; for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { if (bootpc_ifctx_isresolved(ifctx) != 0 || bootpc_ifctx_isfailed(ifctx) != 0) continue; ifctx->call.secs = htons(gctx->secs); } if (error == EWOULDBLOCK) continue; if (error != 0) goto out; len = sizeof(gctx->reply) - auio.uio_resid; /* Do we have the required number of bytes ? */ if (len < BOOTP_MIN_LEN) continue; gctx->replylen = len; /* Is it a reply? */ if (gctx->reply.op != BOOTP_REPLY) continue; /* Is this an answer to our query */ for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { if (gctx->reply.xid != ifctx->call.xid) continue; /* Same HW address size ? */ if (gctx->reply.hlen != ifctx->call.hlen) continue; /* Correct HW address ? */ if (bcmp(gctx->reply.chaddr, ifctx->call.chaddr, ifctx->call.hlen) != 0) continue; break; } if (ifctx != NULL) { s = bootpc_tag(&gctx->tmptag, &gctx->reply, gctx->replylen, TAG_DHCP_MSGTYPE); if (s != NULL) { switch (*s) { case DHCP_OFFER: s = "DHCP Offer"; break; case DHCP_ACK: s = "DHCP Ack"; break; default: s = "DHCP (unexpected)"; break; } } else s = "BOOTP Reply"; printf("Received %s packet" " on %s from ", s, ifctx->ireq.ifr_name); print_in_addr(gctx->reply.siaddr); if (gctx->reply.giaddr.s_addr != htonl(INADDR_ANY)) { printf(" via "); print_in_addr(gctx->reply.giaddr); } if (bootpc_received(gctx, ifctx) != 0) { printf(" (accepted)"); if (ifctx->outstanding) { ifctx->outstanding = 0; outstanding--; } /* Network settle delay */ if (outstanding == 0) atimo = time_second + BOOTP_SETTLE_DELAY; } else printf(" (ignored)"); if (ifctx->gotrootpath) { gotrootpath = 1; rtimo = time_second + BOOTP_SETTLE_DELAY; printf(" (got root path)"); } else printf(" (no root path)"); printf("\n"); } } /* while secs */ #ifdef BOOTP_TIMEOUT if (gctx->secs > BOOTP_TIMEOUT && BOOTP_TIMEOUT > 0) break; #endif /* Force a retry if halfway in DHCP negotiation */ retry = 0; for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { if (ifctx->state == IF_DHCP_OFFERED) { if (ifctx->dhcpquerytype == DHCP_DISCOVER) retry = 1; else ifctx->state = IF_DHCP_UNRESOLVED; } } if (retry != 0) continue; if (gotrootpath != 0) { gctx->gotrootpath = gotrootpath; if (rtimo != 0 && time_second >= rtimo) break; } } /* forever send/receive */ /* * XXX: These are errors of varying seriousness being silently * ignored */ for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { if (bootpc_ifctx_isresolved(ifctx) == 0) { printf("%s timeout for interface %s\n", ifctx->dhcpquerytype != DHCP_NOMSG ? "DHCP" : "BOOTP", ifctx->ireq.ifr_name); } } if (gctx->gotrootpath != 0) { #if 0 printf("Got a root path, ignoring remaining timeout\n"); #endif error = 0; goto out; } #ifndef BOOTP_NFSROOT for (ifctx = gctx->interfaces; ifctx != NULL; ifctx = ifctx->next) { if (bootpc_ifctx_isresolved(ifctx) != 0) { error = 0; goto out; } } #endif error = ETIMEDOUT; goto out; gotreply: out: soclose(so); out0: return error; }
static void soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job) { struct ucred *td_savedcred; struct thread *td; struct file *fp; struct uio uio; struct iovec iov; size_t cnt; int error, flags; SOCKBUF_UNLOCK(sb); aio_switch_vmspace(job); td = curthread; fp = job->fd_file; retry: td_savedcred = td->td_ucred; td->td_ucred = job->cred; cnt = job->uaiocb.aio_nbytes; iov.iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf; iov.iov_len = cnt; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = cnt; uio.uio_segflg = UIO_USERSPACE; uio.uio_td = td; flags = MSG_NBIO; /* TODO: Charge ru_msg* to job. */ if (sb == &so->so_rcv) { uio.uio_rw = UIO_READ; #ifdef MAC error = mac_socket_check_receive(fp->f_cred, so); if (error == 0) #endif error = soreceive(so, NULL, &uio, NULL, NULL, &flags); } else { uio.uio_rw = UIO_WRITE; #ifdef MAC error = mac_socket_check_send(fp->f_cred, so); if (error == 0) #endif error = sosend(so, NULL, &uio, NULL, NULL, flags, td); if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) { PROC_LOCK(job->userproc); kern_psignal(job->userproc, SIGPIPE); PROC_UNLOCK(job->userproc); } } cnt -= uio.uio_resid; td->td_ucred = td_savedcred; /* XXX: Not sure if this is needed? */ if (cnt != 0 && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; if (error == EWOULDBLOCK) { /* * A read() or write() on the socket raced with this * request. If the socket is now ready, try again. * If it is not, place this request at the head of the * queue to try again when the socket is ready. */ SOCKBUF_LOCK(sb); empty_results++; if (soaio_ready(so, sb)) { empty_retries++; SOCKBUF_UNLOCK(sb); goto retry; } if (!aio_set_cancel_function(job, soo_aio_cancel)) { MPASS(cnt == 0); SOCKBUF_UNLOCK(sb); aio_cancel(job); SOCKBUF_LOCK(sb); } else { TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list); } } else { aio_complete(job, cnt, error); SOCKBUF_LOCK(sb); } }
static bool_t svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct uio uio; struct sockaddr *raddr; struct mbuf *mreq; XDR xdrs; int error, rcvflag; /* * Serialise access to the socket. */ sx_xlock(&xprt->xp_lock); /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to read a * packet from the socket and process it. If the read fails, * we have drained all pending requests so we call * xprt_inactive(). */ uio.uio_resid = 1000000000; uio.uio_td = curthread; mreq = NULL; rcvflag = MSG_DONTWAIT; error = soreceive(xprt->xp_socket, &raddr, &uio, &mreq, NULL, &rcvflag); if (error == EWOULDBLOCK) { /* * We must re-test for readability after taking the * lock to protect us in the case where a new packet * arrives on the socket after our call to soreceive * fails with EWOULDBLOCK. The pool lock protects us * from racing the upcall after our soreadable() call * returns false. */ mtx_lock(&xprt->xp_pool->sp_lock); if (!soreadable(xprt->xp_socket)) xprt_inactive_locked(xprt); mtx_unlock(&xprt->xp_pool->sp_lock); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); soupcall_clear(xprt->xp_socket, SO_RCV); SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); xdrmbuf_create(&xdrs, mreq, XDR_DECODE); if (! xdr_callmsg(&xdrs, msg)) { XDR_DESTROY(&xdrs); return (FALSE); } *addrp = raddr; *mp = xdrmbuf_getall(&xdrs); XDR_DESTROY(&xdrs); return (TRUE); }
int osi_NetReceive(osi_socket so, struct sockaddr_in *addr, struct iovec *dvec, int nvecs, int *alength) { #ifdef AFS_DARWIN80_ENV socket_t asocket = (socket_t)so; struct msghdr msg; struct sockaddr_storage ss; int rlen; mbuf_t m; #else struct socket *asocket = (struct socket *)so; struct uio u; #endif int i; struct iovec iov[RX_MAXIOVECS]; struct sockaddr *sa = NULL; int code; size_t resid; int haveGlock = ISAFS_GLOCK(); /*AFS_STATCNT(osi_NetReceive); */ if (nvecs > RX_MAXIOVECS) osi_Panic("osi_NetReceive: %d: Too many iovecs.\n", nvecs); for (i = 0; i < nvecs; i++) iov[i] = dvec[i]; if ((afs_termState == AFSOP_STOP_RXK_LISTENER) || (afs_termState == AFSOP_STOP_COMPLETE)) return -1; if (haveGlock) AFS_GUNLOCK(); #if defined(KERNEL_FUNNEL) thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); #endif #ifdef AFS_DARWIN80_ENV resid = *alength; memset(&msg, 0, sizeof(struct msghdr)); msg.msg_name = &ss; msg.msg_namelen = sizeof(struct sockaddr_storage); sa =(struct sockaddr *) &ss; code = sock_receivembuf(asocket, &msg, &m, 0, alength); if (!code) { size_t offset=0,sz; resid = *alength; for (i=0;i<nvecs && resid;i++) { sz=MIN(resid, iov[i].iov_len); code = mbuf_copydata(m, offset, sz, iov[i].iov_base); if (code) break; resid-=sz; offset+=sz; } } mbuf_freem(m); #else u.uio_iov = &iov[0]; u.uio_iovcnt = nvecs; u.uio_offset = 0; u.uio_resid = *alength; u.uio_segflg = UIO_SYSSPACE; u.uio_rw = UIO_READ; u.uio_procp = NULL; code = soreceive(asocket, &sa, &u, NULL, NULL, NULL); resid = u.uio_resid; #endif #if defined(KERNEL_FUNNEL) thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); #endif if (haveGlock) AFS_GLOCK(); if (code) return code; *alength -= resid; if (sa) { if (sa->sa_family == AF_INET) { if (addr) *addr = *(struct sockaddr_in *)sa; } else printf("Unknown socket family %d in NetReceive\n", sa->sa_family); #ifndef AFS_DARWIN80_ENV FREE(sa, M_SONAME); #endif } return code; }
int ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { #if SOCKNAL_SINGLE_FRAG_TX struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = tx->tx_niov; #endif struct socket *sock = conn->ksnc_sock; int nob; int rc; int i; struct uio suio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, /* This will be valued after a while */ .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_WRITE, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i] = tx->tx_iov[i]; nob += scratchiov[i].iov_len; } suio.uio_resid = nob; CFS_NET_IN; rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, flags); CFS_NET_EX; /* NB there is no return value can indicate how many * have been sent and how many resid, we have to get * sent bytes from suio. */ if (rc != 0) { if (suio.uio_resid != nob &&\ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK)) /* We have sent something */ rc = nob - suio.uio_resid; else if ( rc == EWOULDBLOCK ) /* Actually, EAGAIN and EWOULDBLOCK have same value in OSX */ rc = -EAGAIN; else rc = -rc; } else /* rc == 0 */ rc = nob - suio.uio_resid; return rc; } int ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = tx->tx_nkiov; #endif struct socket *sock = conn->ksnc_sock; lnet_kiov_t *kiov = tx->tx_kiov; int nob; int rc; int i; struct uio suio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, /* It should be valued after a while */ .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_WRITE, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } suio.uio_resid = nob; CFS_NET_IN; rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, flags); CFS_NET_EX; for (i = 0; i < niov; i++) cfs_kunmap(kiov[i].kiov_page); if (rc != 0) { if (suio.uio_resid != nob &&\ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK)) /* We have sent something */ rc = nob - suio.uio_resid; else if ( rc == EWOULDBLOCK ) /* EAGAIN and EWOULD BLOCK have same value in OSX */ rc = -EAGAIN; else rc = -rc; } else /* rc == 0 */ rc = nob - suio.uio_resid; return rc; } /* * liang: Hack of inpcb and tcpcb. * To get tcpcb of a socket, and call tcp_output * to send quick ack. */ struct ks_tseg_qent{ int foo; }; struct ks_tcptemp{ int foo; }; LIST_HEAD(ks_tsegqe_head, ks_tseg_qent); struct ks_tcpcb { struct ks_tsegqe_head t_segq; int t_dupacks; struct ks_tcptemp *unused; int t_timer[4]; struct inpcb *t_inpcb; int t_state; u_int t_flags; /* * There are more fields but we dont need * ...... */ }; #define TF_ACKNOW 0x00001 #define TF_DELACK 0x00002 struct ks_inpcb { LIST_ENTRY(ks_inpcb) inp_hash; struct in_addr reserved1; struct in_addr reserved2; u_short inp_fport; u_short inp_lport; LIST_ENTRY(inpcb) inp_list; caddr_t inp_ppcb; /* * There are more fields but we dont need * ...... */ }; #define ks_sotoinpcb(so) ((struct ks_inpcb *)(so)->so_pcb) #define ks_intotcpcb(ip) ((struct ks_tcpcb *)(ip)->inp_ppcb) #define ks_sototcpcb(so) (intotcpcb(sotoinpcb(so))) void ksocknal_lib_eager_ack (ksock_conn_t *conn) { struct socket *sock = conn->ksnc_sock; struct ks_inpcb *inp = ks_sotoinpcb(sock); struct ks_tcpcb *tp = ks_intotcpcb(inp); int s; CFS_DECL_NET_DATA; extern int tcp_output(register struct ks_tcpcb *tp); CFS_NET_IN; s = splnet(); /* * No TCP_QUICKACK supported in BSD, so I have to call tcp_fasttimo * to send immediate ACK. */ if (tp && tp->t_flags & TF_DELACK){ tp->t_flags &= ~TF_DELACK; tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); } splx(s); CFS_NET_EX; return; } int ksocknal_lib_recv_iov (ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = conn->ksnc_rx_niov; #endif struct iovec *iov = conn->ksnc_rx_iov; int nob; int rc; int i; struct uio ruio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, /* It should be valued after a while */ .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_READ, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i] = iov[i]; nob += scratchiov[i].iov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); ruio.uio_resid = nob; CFS_NET_IN; rc = soreceive(conn->ksnc_sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, &flags); CFS_NET_EX; if (rc){ if (ruio.uio_resid != nob && \ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK || rc == EAGAIN)) /* data particially received */ rc = nob - ruio.uio_resid; else if (rc == EWOULDBLOCK) /* EAGAIN and EWOULD BLOCK have same value in OSX */ rc = -EAGAIN; else rc = -rc; } else rc = nob - ruio.uio_resid; return (rc); } int ksocknal_lib_recv_kiov (ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = conn->ksnc_rx_nkiov; #endif lnet_kiov_t *kiov = conn->ksnc_rx_kiov; int nob; int rc; int i; struct uio ruio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_READ, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); ruio.uio_resid = nob; CFS_NET_IN; rc = soreceive(conn->ksnc_sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, NULL, &flags); CFS_NET_EX; for (i = 0; i < niov; i++) cfs_kunmap(kiov[i].kiov_page); if (rc){ if (ruio.uio_resid != nob && \ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK)) /* data particially received */ rc = nob - ruio.uio_resid; else if (rc == EWOULDBLOCK) /* receive blocked, EWOULDBLOCK == EAGAIN */ rc = -EAGAIN; else rc = -rc; } else rc = nob - ruio.uio_resid; return (rc); } int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) { struct socket *sock = conn->ksnc_sock; int rc; rc = ksocknal_connsock_addref(conn); if (rc != 0) { LASSERT (conn->ksnc_closing); *txmem = *rxmem = *nagle = 0; return -ESHUTDOWN; } rc = libcfs_sock_getbuf(sock, txmem, rxmem); if (rc == 0) { struct sockopt sopt; int len; CFS_DECL_NET_DATA; len = sizeof(*nagle); bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_GET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = nagle; sopt.sopt_valsize = len; CFS_NET_IN; rc = -sogetopt(sock, &sopt); CFS_NET_EX; } ksocknal_connsock_decref(conn); if (rc == 0) *nagle = !*nagle; else *txmem = *rxmem = *nagle = 0; return (rc); } int ksocknal_lib_setup_sock (struct socket *so) { struct sockopt sopt; int rc; int option; int keep_idle; int keep_intvl; int keep_count; int do_keepalive; struct linger linger; CFS_DECL_NET_DATA; rc = libcfs_sock_setbuf(so, *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size); if (rc != 0) { CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n", *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size, rc); return (rc); } /* Ensure this socket aborts active sends immediately when we close * it. */ bzero(&sopt, sizeof sopt); linger.l_onoff = 0; linger.l_linger = 0; sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_LINGER; sopt.sopt_val = &linger; sopt.sopt_valsize = sizeof(linger); CFS_NET_IN; rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't set SO_LINGER: %d\n", rc); goto out; } if (!*ksocknal_tunables.ksnd_nagle) { option = 1; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &option; sopt.sopt_valsize = sizeof(option); rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't disable nagle: %d\n", rc); goto out; } } /* snapshot tunables */ keep_idle = *ksocknal_tunables.ksnd_keepalive_idle; keep_count = *ksocknal_tunables.ksnd_keepalive_count; keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl; do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); option = (do_keepalive ? 1 : 0); bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_KEEPALIVE; sopt.sopt_val = &option; sopt.sopt_valsize = sizeof(option); rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't set SO_KEEPALIVE: %d\n", rc); goto out; } if (!do_keepalive) { /* no more setting, just return */ rc = 0; goto out; } bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_KEEPALIVE; sopt.sopt_val = &keep_idle; sopt.sopt_valsize = sizeof(keep_idle); rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't set TCP_KEEPALIVE : %d\n", rc); goto out; } out: CFS_NET_EX; return (rc); } void ksocknal_lib_push_conn(ksock_conn_t *conn) { struct socket *sock; struct sockopt sopt; int val = 1; int rc; CFS_DECL_NET_DATA; rc = ksocknal_connsock_addref(conn); if (rc != 0) /* being shut down */ return; sock = conn->ksnc_sock; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &val; sopt.sopt_valsize = sizeof val; CFS_NET_IN; sosetopt(sock, &sopt); CFS_NET_EX; ksocknal_connsock_decref(conn); return; } extern void ksocknal_read_callback (ksock_conn_t *conn); extern void ksocknal_write_callback (ksock_conn_t *conn); static void ksocknal_upcall(struct socket *so, caddr_t arg, int waitf) { ksock_conn_t *conn = (ksock_conn_t *)arg; ENTRY; read_lock (&ksocknal_data.ksnd_global_lock); if (conn == NULL) goto out; if (so->so_rcv.sb_flags & SB_UPCALL) { extern int soreadable(struct socket *so); if (conn->ksnc_rx_nob_wanted && soreadable(so)) /* To verify whether the upcall is for receive */ ksocknal_read_callback (conn); } /* go foward? */ if (so->so_snd.sb_flags & SB_UPCALL){ extern int sowriteable(struct socket *so); if (sowriteable(so)) /* socket is writable */ ksocknal_write_callback(conn); } out: read_unlock (&ksocknal_data.ksnd_global_lock); EXIT; } void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn) { /* No callback need to save in osx */ return; } void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) { CFS_DECL_NET_DATA; CFS_NET_IN; sock->so_upcallarg = (void *)conn; sock->so_upcall = ksocknal_upcall; sock->so_snd.sb_timeo = 0; sock->so_rcv.sb_timeo = cfs_time_seconds(2); sock->so_rcv.sb_flags |= SB_UPCALL; sock->so_snd.sb_flags |= SB_UPCALL; CFS_NET_EX; return; } void ksocknal_lib_act_callback(struct socket *sock, ksock_conn_t *conn) { CFS_DECL_NET_DATA; CFS_NET_IN; ksocknal_upcall (sock, (void *)conn, 0); CFS_NET_EX; }
static int bsd_read(struct CYG_FILE_TAG *fp, struct CYG_UIO_TAG *uio) { return (soreceive((struct socket *)fp->f_data, (struct sockaddr **)0, uio, (struct mbuf **)0, (struct mbuf **)0, (int *)0)); }
/* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct mbuf *m; struct ng_mesg *response; struct uio auio; int s, flags, error; s = splnet(); /* so = priv->so; *//* XXX could have derived this like so */ KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; soclrstate(so, SS_ISCONNECTING); } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_WAITOK | M_NULLOK); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* Check whether a pending accept operation has completed */ if (priv->flags & KSF_ACCEPTING) { error = ng_ksocket_check_accept(priv); if (error != EWOULDBLOCK) priv->flags &= ~KSF_ACCEPTING; if (error == 0) ng_ksocket_finish_accept(priv); } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) { splx(s); return; } /* Read and forward available mbuf's */ auio.uio_td = NULL; auio.uio_resid = 1000000000; flags = MSG_DONTWAIT; while (1) { struct sockaddr *sa = NULL; struct mbuf *n; /* Try to get next packet from socket */ if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &auio, &m, NULL, &flags)) != 0) break; /* See if we got anything */ if (m == NULL) { if (sa != NULL) kfree(sa, M_SONAME); break; } /* * Don't trust the various socket layers to get the * packet header and length correct (e.g. kern/15175). * * Also, do not trust that soreceive() will clear m_nextpkt * for us (e.g. kern/84952, kern/82413). */ m->m_pkthdr.csum_flags = 0; for (n = m, m->m_pkthdr.len = 0; n != NULL; n = n->m_next) { m->m_pkthdr.len += n->m_len; n->m_nextpkt = NULL; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, MB_DONTWAIT); if (stag == NULL) { kfree(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); kfree(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { MGETHDR(m, MB_DONTWAIT, MT_DATA); if (m != NULL) { m->m_len = m->m_pkthdr.len = 0; NG_SEND_DATA_ONLY(error, priv->hook, m); } priv->flags |= KSF_EOFSEEN; } splx(s); }