/* * ------------------------------------------------------------------------- * * M_FLUSH Handling * * ------------------------------------------------------------------------- */ STATIC inline void sscop_w_flush(queue_t *q, mblk_t *mp) { if (*mp->b_rptr & FLUSHW) { if (*mp->b_rptr & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); if (q - q_next) { putnext(q, mp); return; } *mp->b_rptr &= ~FLUSHW; } if (*mp->b_rptr & FLUSHR) { if (*mp->b_rptr & FLUSHBAND) flushband(RD(q), mp->b_rptr[1], FLUSHDATA); else flushq(RD(q), FLUSHDATA); qreply(q, mp); return; } if (q->q_next) { putnext(q, mp); return; } }
static fastcall __unlikely int ch_m_flush(queue_t *q, mblk_t *mp) { if (mp->b_rptr[0] & ((q->q_flag & QREADR) ? FLUSHR : FLUSHW)) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } /* do a pipemod flush sense reversal too */ switch (mp->b_rptr[0] & FLUSHRW) { case FLUSHR: mp->b_rptr[0] &= ~FLUSHRW; mp->b_rptr[0] |= FLUSHW; break; case FLUSHW: mp->b_rptr[0] &= ~FLUSHRW; mp->b_rptr[0] |= FLUSHR; break; case FLUSHRW: mp->b_rptr[0] &= ~FLUSHRW; mp->b_rptr[0] |= FLUSHRW; break; } putnext(q, mp); return (0); }
/* * M_FLUSH Handling * ------------------------------------------------------------------------- */ STATIC INLINE int spm_m_flush(queue_t *q, mblk_t *mp, const uint8_t mflag) { if (*mp->b_rptr & mflag) { if (*mp->b_rptr & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } return (QR_LOOP); }
/* * M_FLUSH Processing * ------------------------------------------------------------------------- */ static int xxx_r_flush(queue_t *q, mblk_t *mp) { if (*mp->b_rptr & FLUSHR) { if (*mp->b_rptr & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushall(q, FLUSHDATA); *mp->b_rptr &= ~FLUSHR; } if ((*mp->b_rptr & FLUSHW) && !(mp->b_flags & MSGNOLOOP)) { if (*mp->b_rptr & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushall(q, FLUSHDATA); mp->b_flag |= MSGNOLOOP; qreply(q, mp); /* flush all the way back down */ } freemsg(mp); return (0); }
static fastcall zap_r_flush(queue_t *q, mblk_t *mp) { /* canonical flush for driver read queue */ if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); mp->b_rptr[0] &= ~FLUSHR; } if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(_WR(q), mp->b_rptr[1], FLUSHDATA); else flushq(_WR(q), FLUSHDATA); qreply(q, mp); return (0); } freemsg(mp); return (0); }
static streamscall int spx_rput(queue_t *q, mblk_t *mp) { switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } break; } putnext(q, mp); return (0); }
STATIC streamscall int srvmod_rput(queue_t *q, mblk_t *mp) { if (unlikely(mp->b_datap->db_type == M_FLUSH)) { if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } } if (likely(mp->b_datap->db_type >= QPCTL || (q->q_first == NULL && !(q->q_flag & QSVCBUSY) && bcanputnext(q, mp->b_band)))) { putnext(q, mp); return (0); } if (unlikely(putq(q, mp) == 0)) { mp->b_band = 0; putq(q, mp); /* this must succeed */ } return (0); }
STATIC streamscall int srvmod_wput(queue_t *q, mblk_t *mp) { if (unlikely(mp->b_datap->db_type == M_FLUSH)) { if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } } if (likely(mp->b_datap->db_type >= QPCTL || (q->q_first == NULL && !(q->q_flag & QSVCBUSY) && bcanputnext(q, mp->b_band)))) { putnext(q, mp); return (0); } /* always buffer, always schedule out of service procedure for testing */ if (unlikely(putq(q, mp) == 0)) { mp->b_band = 0; putq(q, mp); /* this must succeed */ } return (0); }
STATIC streamscall int ip2xinet_lrput(queue_t *q, mblk_t *mp) { struct iocblk *iocp; union DL_primitives *dp; struct ip2xinet_priv *privptr; struct net_device *dev; int i; spin_lock(&ip2xinet_lock); /* use the first open ip device */ for (i = 0; i < NUMIP2XINET; i++) { privptr = &ip2xinet_devs[i].priv; if (privptr->state == 1) break; } if (i == NUMIP2XINET) i = 0; /* All devices closed, pick the 1st one */ /* send data up to ip through the 1st open device */ dev = &ip2xinet_devs[i].dev; switch (mp->b_datap->db_type) { case M_CTL: freemsg(mp); break; case M_DATA: /* NOTE: We don't expect any M_DATA messages from xinet */ freemsg(mp); break; case M_PROTO: case M_PCPROTO: dp = (union DL_primitives *) mp->b_rptr; #if 0 #ifdef DEBUG printk("ip2xinet_lrput: %s size=%d\n", x25dbdlpmsg(dp->dl_primitive), x25dbmsgsize(mp)); #endif #endif switch (dp->dl_primitive) { case DL_BIND_ACK: /* if we're in in BNDPND and receive a BIND_ACK we go to IDLE */ ip2xinet_status.ip2x_dlstate = DL_IDLE; /* If we're DL_IDLE, then dev is open and the kernel can transmit */ for (i = 0; i < NUMIP2XINET; i++) { privptr = &ip2xinet_devs[i].priv; if (privptr->state == 1) netif_start_queue(&(ip2xinet_devs[i].dev)); } freemsg(mp); /* Frees bind_ack no longer needed */ break; case DL_INFO_ACK: /* NOTE: currently we don't send info_req to xinet */ freemsg(mp); break; case DL_ERROR_ACK: switch (ip2xinet_status.ip2x_dlstate) { case DL_ATTACH_PENDING: /* if we receive ERROR_ACK and we're in ATTACH_PEND go into UNATTACHED */ ip2xinet_status.ip2x_dlstate = DL_UNATTACHED; freemsg(mp); break; case DL_BIND_PENDING: /* if we're in BNDPND and receive an ERR ack we go to UNBND, */ ip2xinet_status.ip2x_dlstate = DL_UNBOUND; freemsg(mp); break; case DL_UNBIND_PENDING: /* If we're in UNBIND_PEND and we receive ERROR_ACK we go into IDLE */ ip2xinet_status.ip2x_dlstate = DL_IDLE; freemsg(mp); break; case DL_DETACH_PENDING: /* If we're in DETACH_PEND and receive and ERROR_ACK we go into UNBND */ ip2xinet_status.ip2x_dlstate = DL_UNBOUND; freemsg(mp); break; default: freemsg(mp); break; } break; case DL_UNITDATA_IND: /* if we're in IDLE we can get DL_UNITDATA_IND with data and call the guy who would normally receive data from interrupt handler. */ /* Check state: can't transmit if dev is closed :-) Note: we have to check both the dlpi state and dev->start because during a close the DLPI state could remain DL_IDLE if we couldn't allocate mblk for UNBIND_REQ. There are many ways in which the dev->start could be 1 but dlpi state - not DL_IDLE. */ if (ip2xinet_status.ip2x_dlstate == DL_IDLE && privptr->state == 1) { mblk_t *newmp; unsigned char *buf; int len, tmplen; struct ethhdr *eth; struct sk_buff *skb; newmp = unlinkb(mp); freemsg(mp); mp = newmp; /* 1st pass through. figure out the len */ for (len = sizeof(struct ethhdr); newmp != NULL; newmp = newmp->b_cont) len += (newmp->b_wptr - newmp->b_rptr); /* ALLOCATE skb of length len+2, COPY from mp chain to skb */ skb = dev_alloc_skb(len + 2); if (!skb) { printk("ip2xinet rx: failed to allocate an skb\n"); freemsg(mp); break; } skb_reserve(skb, 2); /* align IP on 16B boundary */ /* The packet has been retrieved from the transmission medium. Build an skb around it, so upper layers can handle it */ buf = skb_put(skb, len); for (newmp = mp, tmplen = sizeof(struct ethhdr); newmp != NULL; newmp = newmp->b_cont) { bcopy(newmp->b_rptr, buf + tmplen, newmp->b_wptr - newmp->b_rptr); tmplen += (newmp->b_wptr - newmp->b_rptr); } eth = (struct ethhdr *) buf; /* I am not sure it's necessary, but just in case... */ memcpy(eth->h_source, dev->dev_addr, dev->addr_len); memcpy(eth->h_dest, dev->dev_addr, dev->addr_len); eth->h_proto = 0x8; /* ETH_P_IP in network order */ eth->h_source[ETH_ALEN - 1] ^= 0x01; /* say src is us xor 1 */ /* send it to ip2xinet_rx for handling */ ip2xinet_rx(dev, skb); } freemsg(mp); break; case DL_UDERROR_IND: freemsg(mp); break; case DL_OK_ACK: switch (dp->ok_ack.dl_correct_primitive) { case DL_ATTACH_REQ: /* if we're in ATTACH_PEND and we received OK_ACK1 change state to UNBND */ ip2xinet_status.ip2x_dlstate = DL_UNBOUND; freemsg(mp); /* We just completed building up the X.25 stack below us. If IP is already above us, we need to send down the bind that we would normally do when IP opens us. This allows us to restart the X.25 stack without restarting TCP/IP. */ if (ip2xinet_num_ip_opened != 0) ip2xinet_send_down_bind(WR(q)); break; case DL_UNBIND_REQ: /* If we're in UNBIND_PEND and receive OK_ACK1 we go to UNBND. */ ip2xinet_status.ip2x_dlstate = DL_UNBOUND; freemsg(mp); break; case DL_DETACH_REQ: /* If we're in DETACH_PEND and receive OK_ACK1 we go to UNATT */ ip2xinet_status.ip2x_dlstate = DL_UNATTACHED; freemsg(mp); break; default: freemsg(mp); break; } break; default: printk("ip2xinet_lrput: bad prim=0x%lx", (ulong) dp->dl_primitive); freemsg(mp); break; } break; case M_FLUSH: if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); qenable(q); } if (mp->b_rptr[0] & FLUSHW) { mp->b_rptr[0] &= ~FLUSHR; if (mp->b_rptr[0] & FLUSHBAND) flushband(WR(q), mp->b_rptr[1], FLUSHDATA); else flushq(WR(q), FLUSHDATA); qenable(WR(q)); if (!putq(WR(q), mp)) { mp->b_band = 0; putq(WR(q), mp); } } else freemsg(mp); break; case M_HANGUP: /* send it to the guy that linked us up, what he does is his problem. */ if (!putq(ip2xinet_status.readq, mp)) { mp->b_band = 0; putq(ip2xinet_status.readq, mp); } break; case M_IOCACK: iocp = (struct iocblk *) mp->b_rptr; if (iocp->ioc_cmd == SIOCSIFMTU) { /* The set MTU ioctl was a success Rejoice :-) */ freemsg(mp); } else if (!putq(ip2xinet_status.readq, mp)) { mp->b_band = 0; putq(ip2xinet_status.readq, mp); } break; case M_IOCNAK: iocp = (struct iocblk *) mp->b_rptr; if (iocp->ioc_cmd == SIOCSIFMTU) { /* The set MTU ioctl was a failure From looking at xinet code this is * impossible, so ignore it */ freemsg(mp); } else if (!putq(ip2xinet_status.readq, mp)) { mp->b_band = 0; putq(ip2xinet_status.readq, mp); } break; default: printk("ip2xinet_lrput: bad type=%d", mp->b_datap->db_type); freemsg(mp); break; } spin_unlock(&ip2xinet_lock); return (0); }
STATIC streamscall int ip2xinet_uwput(queue_t *q, mblk_t *mp) { int i; spin_lock(&ip2xinet_lock); switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); qenable(q); mp->b_rptr[0] &= ~FLUSHW; } if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(RD(q), mp->b_rptr[1], FLUSHDATA); else flushq(RD(q), FLUSHDATA); if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } } else freemsg(mp); break; case M_IOCTL: /* Process at least the I_LINK, I_UNLINK */ /* THINKME: Failure to correctly process I_LINK/I_UNLINK while returning correctly a nack to stream head will leave us in a possibly totally screwed up DLPI state from which we have to somehow recover. The possible problematic states are DL_UNBOUND, any DL_PENDING states Note: if we stay in UNATTACHED on I_LINK failure or in IDLE on I_UNLINK failure we're ok as long as the private data structure stuff is consistent with the state */ { struct iocblk *iocp; mblk_t *nmp; dl_attach_req_t *attach; struct linkblk *lp; iocp = (struct iocblk *) mp->b_rptr; #if 0 #ifdef DEBUG pkt_debug(X25DBIOCTL) KPRINTF("%s size %d\n", x25dbiocmsg(iocp->ioc_cmd), x25dbmsgsize(mp)); #endif #endif switch ((unsigned) iocp->ioc_cmd) { case I_LINK: iocp->ioc_error = 0; iocp->ioc_rval = 0; iocp->ioc_count = 0; lp = (struct linkblk *) mp->b_cont->b_rptr; /* Use only one xinet queue for all devices */ ip2xinet_status.lowerq = lp->l_qbot; ip2xinet_status.index = lp->l_index; /* Only one read q to get data from xinet */ ip2xinet_status.readq = RD(q); /* These are dummy ones to indicate the queues are being used */ ip2xinet_status.lowerq->q_ptr = (char *) &ip2xinet_numopen; RD(ip2xinet_status.lowerq)->q_ptr = (char *) &ip2xinet_numopen; if ((nmp = allocb(sizeof(union DL_primitives), BPRI_LO)) == NULL) { iocp->ioc_error = ENOSR; mp->b_datap->db_type = M_IOCNAK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } spin_unlock(&ip2xinet_lock); printk("pktioctl: I_LINK failed: allocb failed"); return (0); } /* Setup and send an ATTACH */ nmp->b_datap->db_type = M_PROTO; nmp->b_wptr += DL_ATTACH_REQ_SIZE; attach = (dl_attach_req_t *) nmp->b_rptr; attach->dl_primitive = DL_ATTACH_REQ; attach->dl_ppa = ip2xinet_status.myminor; ip2xinet_status.ip2x_dlstate = DL_ATTACH_PENDING; /* experience shows that an I_LINKed queue needs to be enabled so that the service routine will be run. */ qenable(ip2xinet_status.lowerq); if (!putq(ip2xinet_status.lowerq, nmp)) { nmp->b_band = 0; putq(ip2xinet_status.lowerq, nmp); } /* all went well */ mp->b_datap->db_type = M_IOCACK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } break; case I_UNLINK: { struct linkblk *lp; iocp->ioc_error = 0; iocp->ioc_rval = 0; iocp->ioc_count = 0; lp = (struct linkblk *) mp->b_cont->b_rptr; /* Ignore the DLPI state, the stack is being torn down regardless. */ ip2xinet_status.ip2x_dlstate = UNLINKED; /* can't transmit any more */ for (i = 0; i < NUMIP2XINET; i++) { struct ip2xinet_priv *privptr = &ip2xinet_devs[i].priv; if (privptr->state == 1) netif_stop_queue(&(ip2xinet_devs[i].dev)); } flushq(q, FLUSHALL); flushq(RD(lp->l_qbot), FLUSHALL); ip2xinet_status.readq = NULL; ip2xinet_status.lowerq = NULL; mp->b_datap->db_type = M_IOCACK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } break; } default: iocp->ioc_error = EINVAL; mp->b_datap->db_type = M_IOCNAK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } break; } } break; case M_DATA: case M_PCPROTO: case M_PROTO: default: printk("ip2xinet_uwput: unexpected type=0x%x", mp->b_datap->db_type); freemsg(mp); break; } spin_unlock(&ip2xinet_lock); return (0); }
static void m3ua_wput(queue_t *q, mblk_t *mp) { int err = EOPNOTSUPP; trace(); if (q->q_count && mp->b_datap->db_type < QPCTL) { putq(q, mp); return; } switch (mp->b_datap->db_type) { case M_DATA: if ((err = m3ua_m_proto(q, mp))) break; return; case M_CTL: case M_PROTO: case M_PCPROTO: if ((err = m3ua_m_proto(q, mp))) break; return; case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); if (q->q_next) { putnext(q, mp); return; } mp->b_rptr[0] &= ~FLUSHW; } if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(RD(1), mp->b_rptr[1], FLUSHDATA); else flushq(RD(q), FLUSHDATA); qreply(q, mp); } else break; return; case M_IOCTL: if ((err = ls_m_ioctl(q, mp))) break; return; } switch (err) { case EAGAIN: if (mp->b_datap->db_type < QPCTL) { putq(q, mp); return; } break; case EOPNTOSUPP: if (q->q_next) { putnext(q, mp); return; } } trace(); freemsg(mp); return; }
/* * ptemwput - Module write queue put procedure. * * This is called from the module or stream head upstream. * * XXX: This routine is quite lazy about handling allocation failures, * basically just giving up and reporting failure. It really ought to * set up bufcalls and only fail when it's absolutely necessary. */ static void ptemwput(queue_t *q, mblk_t *mp) { struct ptem *ntp = (struct ptem *)q->q_ptr; struct iocblk *iocp; /* outgoing ioctl structure */ struct copyresp *resp; unsigned char type = mp->b_datap->db_type; if (type >= QPCTL) { switch (type) { case M_IOCDATA: resp = (struct copyresp *)mp->b_rptr; if (resp->cp_rval) { /* * Just free message on failure. */ freemsg(mp); break; } /* * Only need to copy data for the SET case. */ switch (resp->cp_cmd) { case TIOCSWINSZ: ptioc(q, mp, WRSIDE); break; case JWINSIZE: case TIOCGWINSZ: mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; default: freemsg(mp); } break; case M_FLUSH: if (*mp->b_rptr & FLUSHW) { if ((ntp->state & IS_PTSTTY) && (*mp->b_rptr & FLUSHBAND)) flushband(q, *(mp->b_rptr + 1), FLUSHDATA); else flushq(q, FLUSHDATA); } putnext(q, mp); break; case M_READ: freemsg(mp); break; case M_STOP: /* * Set the output flow control state. */ ntp->state |= OFLOW_CTL; putnext(q, mp); break; case M_START: /* * Relieve the output flow control state. */ ntp->state &= ~OFLOW_CTL; putnext(q, mp); qenable(q); break; default: putnext(q, mp); break; } return; } /* * If our queue is nonempty or flow control persists * downstream or module in stopped state, queue this message. */ if (q->q_first != NULL || !bcanputnext(q, mp->b_band)) { /* * Exception: ioctls, except for those defined to * take effect after output has drained, should be * processed immediately. */ switch (type) { case M_IOCTL: iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { /* * Queue these. */ case TCSETSW: case TCSETSF: case TCSETAW: case TCSETAF: case TCSBRK: break; /* * Handle all others immediately. */ default: (void) ptemwmsg(q, mp); return; } break; case M_DELAY: /* tty delays not supported */ freemsg(mp); return; case M_DATA: if ((mp->b_wptr - mp->b_rptr) < 0) { /* * Free all bad length messages. */ freemsg(mp); return; } else if ((mp->b_wptr - mp->b_rptr) == 0) { if (!(ntp->state & IS_PTSTTY)) { freemsg(mp); return; } } } (void) putq(q, mp); return; } /* * fast path into ptemwmsg to dispose of mp. */ if (!ptemwmsg(q, mp)) (void) putq(q, mp); }
static streamscall int spx_wput(queue_t *q, mblk_t *mp) { struct spx *p = q->q_ptr; switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); if (q->q_next) { putnext(q, mp); break; } mp->b_rptr[0] &= ~FLUSHW; } if (mp->b_rptr[0] & FLUSHR) { if (q->q_next) { putnext(q, mp); break; } if (mp->b_rptr[0] & FLUSHBAND) flushband(RD(q), mp->b_rptr[1], FLUSHDATA); else flushq(RD(q), FLUSHDATA); qreply(q, mp); break; } freemsg(mp); break; case M_PROTO: /* We go to some trouble here to make sure that we do not intercept M_PROTO messages that are not for us. This is because we want the stream to support passing of M_PROTO and M_PCPROTO messages as well, regardless of whether it is just a loop-back device or whether it is an unnamed pipe. */ if (p->init == 0 && mp->b_wptr >= mp->b_rptr + sizeof(long)) { queue_t *oq = NULL; struct spx *x; /* not necessarily aligned */ bcopy(mp->b_rptr, oq, sizeof(*oq)); /* validate against list */ spin_lock(&spx_lock); for (x = spx_list; x && x->q != oq; x = x->next) ; if (x && x->q == oq) { weldq(WR(q), oq, WR(oq), q, NULL, NULL, NULL); spin_unlock(&spx_lock); /* FIXME: welding is probably not enough. We probably have to link the two stream heads together, pipe-style as well as setting some stream head characteristics. People would be better to use the pipe(4) device anyway. */ break; } spin_unlock(&spx_lock); } default: if (q->q_next) putnext(q, mp); else qreply(q, mp); break; } if (p->init == 0) p->init = 1; return (0); #if 0 nak: { union ioctypes *ioc; mp->b_datap->db_type = M_IOCNAK; ioc = (typeof(ioc)) mp->b_rptr; ioc->iocblk.ioc_count = 0; ioc->iocblk.ioc_rval = -1; ioc->iocblk.ioc_error = -err; qreply(q, mp); return (0); } #endif }
/** * ptem_w_msg - process a message on the write side * @q: write queue * @mp: message to process * * Returns 1 when the caller (putp or srvp) needs to queue or requeue the * message. Returns 0 when the message has been disposed and the caller must * release its reference to mp. * * Keep this function out of the way of the fastpath. */ static streams_noinline int ptem_w_msg(queue_t *q, mblk_t *mp) { struct ptem *p = PTEM_PRIV(q); /* fast path */ if (likely(mp->b_datap->db_type == M_DATA)) { m_data: if ((p->flags & PTEM_OUTPUT_STOPPED) || (q->q_first != NULL) || (q->q_flag & QSVCBUSY) || (!bcanputnext(q, mp->b_band))) return (1); putnext(q, mp); return (0); } switch (mp->b_datap->db_type) { case M_DATA: goto m_data; case M_IOCTL: { struct iocblk *ioc = (struct iocblk *) mp->b_rptr; int error = EINVAL; int rval = 0; int count = 0; mblk_t *bp, *cp; /* The Stream head is set to recognized all transparent terminal input-output controls and pass them downstream as though they were I_STR input-output controls. There is also the opportunity to register input-output controls with the Stream head using the TIOC_REPLY message. */ if (ioc->ioc_count == TRANSPARENT) { __swerr(); goto nak; } if ((bp = mp->b_cont) == NULL) goto nak; switch (ioc->ioc_cmd) { case TCSETAF: /* Note, if properly handled the M_FLUSH message will never be queued and upon successful return from this function, we have already processed the read-side flush along the entire Stream. */ if (!putnextctl1(q, M_FLUSH, FLUSHR)) { error = EAGAIN; goto nak; } /* fall through */ case TCSETAW: /* Note, output should have already drained. */ /* fall through */ case TCSETA: { struct termio *c; mblk_t *zp; if (!pullupmsg(bp, sizeof(struct termio))) goto nak; c = (typeof(c)) bp->b_rptr; if ((c->c_cflag & CBAUD) == B0) { /* slave hangup */ if ((zp = xchg(&p->zero, NULL))) putnext(q, zp); } else { if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } p->c.c_iflag = (p->c.c_iflag & 0xffff0000) | c->c_iflag; p->c.c_oflag = (p->c.c_oflag & 0xffff0000) | c->c_oflag; p->c.c_cflag = (p->c.c_cflag & 0xffff0000) | c->c_cflag; p->c.c_lflag = (p->c.c_lflag & 0xffff0000) | c->c_lflag; p->c.c_line = c->c_line; bcopy(c->c_cc, p->c.c_cc, NCC); putnext(q, cp); } goto ack; } case TCSETSF: /* Note, if properly handled the M_FLUSH message will never be queued and upon successful return from this function, we have already processed the read-side flush along the entire Stream. */ if (!putnextctl1(q, M_FLUSH, FLUSHR)) { error = EAGAIN; goto nak; } /* fall through */ case TCSETSW: /* Note, output should have already drained. */ /* fall through */ case TCSETS: { struct termios *c; mblk_t *zp; if (!pullupmsg(bp, sizeof(struct termios))) goto nak; c = (typeof(c)) bp->b_rptr; if ((c->c_cflag & CBAUD) == B0) { /* slave hangup */ if ((zp = xchg(&p->zero, NULL))) putnext(q, zp); } else { if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } p->c = *c; putnext(q, cp); } goto ack; } case TCGETA: { struct termio *c; extern void __struct_termio_is_too_large_for_fastbuf(void); if (FASTBUF < sizeof(struct termio)) __struct_termio_is_too_large_for_fastbuf(); count = sizeof(*c); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; c = (typeof(c)) bp->b_rptr; c->c_iflag = p->c.c_iflag; c->c_oflag = p->c.c_oflag; c->c_cflag = p->c.c_cflag; c->c_lflag = p->c.c_lflag; c->c_line = p->c.c_line; bcopy(p->c.c_cc, p->c.c_cc, NCC); goto ack; } case TCGETS: { extern void __struct_termios_is_too_large_for_fastbuf(void); if (FASTBUF < sizeof(struct termios)) __struct_termios_is_too_large_for_fastbuf(); count = sizeof(p->c); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; *((struct termios *) bp->b_rptr) = p->c; goto ack; } case TIOCGWINSZ: { extern void __struct_winsize_is_too_large_for_fastbuf(void); if (!(p->flags & PTEM_HAVE_WINSIZE)) goto nak; if (FASTBUF < sizeof(struct winsize)) __struct_winsize_is_too_large_for_fastbuf(); count = sizeof(p->ws); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; *((struct winsize *) bp->b_rptr) = p->ws; goto ack; } #ifdef JWINSIZE case JWINSIZE: { struct jwinsize *jws; extern void __struct_jwinsize_is_too_large_for_fastbuf(void); if (!(p->flags & PTEM_HAVE_WINSIZE)) goto nak; if (FASTBUF < sizeof(struct jwinsize)) __struct_jwinsize_is_too_large_for_fastbuf(); /* always have room in a fastbuf */ count = sizeof(*jws); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; jws = (typeof(jws)) bp->b_rptr; jws->bytesx = p->ws.ws_col; jws->bytesy = p->ws.ws_row; jws->bitsx = p->ws.ws_xpixel; jws->bitsy = p->ws.ws_ypixel; goto ack; } #endif /* JWINSIZE */ case TIOCSWINSZ: { struct winsize *ws; int changed = 0; int zeroed = !(p->flags & PTEM_HAVE_WINSIZE); mblk_t *mb; if (!pullupmsg(bp, sizeof(*ws))) goto nak; if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } if (!(mb = allocb(1, BPRI_MED))) { freemsg(cp); error = EAGAIN; goto nak; } ws = (typeof(ws)) bp->b_rptr; if (ws->ws_col != p->ws.ws_col) { if ((p->ws.ws_col = ws->ws_col)) zeroed = 0; changed = 1; } if (ws->ws_row != p->ws.ws_row) { if ((p->ws.ws_row = ws->ws_row)) zeroed = 0; changed = 1; } if (ws->ws_xpixel != p->ws.ws_xpixel) { if ((p->ws.ws_xpixel = ws->ws_xpixel)) zeroed = 0; changed = 1; } if (ws->ws_ypixel != p->ws.ws_ypixel) { if ((p->ws.ws_ypixel = ws->ws_ypixel)) zeroed = 0; changed = 1; } if (zeroed) p->flags &= ~PTEM_HAVE_WINSIZE; else p->flags |= PTEM_HAVE_WINSIZE; if (changed) { mb->b_datap->db_type = M_SIG; *mb->b_wptr++ = SIGWINCH; qreply(q, mb); } else freeb(mb); putnext(q, cp); /* copy for pctk(4) */ count = 0; goto ack; } case TCSBRK: if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } putnext(q, cp); count = 0; goto ack; default: goto nak; } break; ack: mp->b_datap->db_type = M_IOCACK; ioc->ioc_error = 0; ioc->ioc_rval = rval; ioc->ioc_count = count; goto reply; nak: mp->b_datap->db_type = M_IOCNAK; ioc->ioc_error = error; ioc->ioc_rval = -1; ioc->ioc_count = 0; reply: qreply(q, mp); break; } case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } putnext(q, mp); break; default: if (mp->b_datap->db_type < QPCTL) { if ((q->q_first != NULL) || (q->q_flag & QSVCBUSY) || (!bcanputnext(q, mp->b_band))) return (1); /* (re)queue */ } putnext(q, mp); break; } return (0); }
static streamscall int sad_put(queue_t *q, mblk_t *mp) { struct sad *sad = q->q_ptr; union ioctypes *ioc; int err = 0, rval = 0, count = 0; mblk_t *dp = mp->b_cont; caddr_t sa_addr, sl_addr; size_t sa_size, sl_size; switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); mp->b_rptr[0] &= ~FLUSHW; } if (mp->b_rptr[0] & FLUSHR) { queue_t *rq = RD(q); if (mp->b_rptr[0] & FLUSHBAND) flushband(rq, mp->b_rptr[1], FLUSHDATA); else flushq(rq, FLUSHDATA); qreply(q, mp); return (0); } break; case M_IOCTL: ioc = (typeof(ioc)) mp->b_rptr; #ifdef WITH_32BIT_CONVERSION if (ioc->iocblk.ioc_flag == IOC_ILP32) { /* XXX: following pointer conversion does not work on all architectures. */ sa_addr = (caddr_t) (unsigned long) (uint32_t) *(unsigned long *) dp->b_rptr; sa_size = sizeof(struct strapush32); sl_addr = sa_addr; sl_size = sizeof(struct str_list32); } else #endif { sa_addr = (caddr_t) *(unsigned long *) dp->b_rptr; sa_size = sizeof(struct strapush); sl_addr = sa_addr; sl_size = sizeof(struct str_list); } switch (ioc->iocblk.ioc_cmd) { case SAD_SAP: err = -EPERM; #ifdef HAVE_KMEMB_STRUCT_CRED_UID_VAL if (ioc->iocblk.ioc_uid.val != 0) goto nak; #else if (ioc->iocblk.ioc_uid != 0) goto nak; #endif if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sa_addr; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_sap_state1; case SAD_GAP: if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sa_addr; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_gap_state1; case SAD_LAP: if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sa_addr; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_lap_state1; case SAD_VML: if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sl_addr; ioc->copyreq.cq_size = sl_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_vml_state1; } err = -EINVAL; goto nak; case M_IOCDATA: ioc = (typeof(ioc)) mp->b_rptr; if (ioc->copyresp.cp_rval != (caddr_t) 0) { sad->transparent = 0; sad->iocstate = 0; goto abort; } #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { sa_size = sizeof(struct strapush32); sl_size = sizeof(struct str_list32); } else #endif { sa_size = sizeof(struct strapush); sl_size = sizeof(struct str_list); } switch (ioc->copyresp.cp_cmd) { case SAD_SAP: switch (sad->iocstate) { case 1: sad_sap_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sa_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr; struct strapush sa, *sap = &sa; sap32_convert(sap32, sap); if ((err = apush_set(sap))) goto nak; sap32_revert(sap, sap32); } else #endif { struct strapush *sap = (typeof(sap)) dp->b_rptr; if ((err = apush_set(sap))) goto nak; } if (sad->transparent == 1) { mp->b_datap->db_type = M_COPYOUT; ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; sad->transparent = 1; sad->iocstate = 2; qreply(q, mp); return (0); } /* use implied I_STR copyout */ count = sa_size; goto ack; case 2: /* done */ goto ack; } err = -EIO; goto nak; case SAD_GAP: switch (sad->iocstate) { case 1: sad_gap_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sa_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr; struct strapush sa, *sap = &sa; sap32_convert(sap32, sap); if ((err = apush_get(sap))) goto nak; sap32_revert(sap, sap32); } else #endif { struct strapush *sap; sap = (typeof(sap)) dp->b_rptr; if ((err = apush_get(sap))) goto nak; } if (sad->transparent == 1) { mp->b_datap->db_type = M_COPYOUT; ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; sad->transparent = 1; sad->iocstate = 2; qreply(q, mp); return (0); } /* use implied I_STR copyout */ count = sa_size; goto ack; case 2: /* done */ goto ack; } err = -EIO; goto nak; case SAD_LAP: switch (sad->iocstate) { case 1: sad_lap_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sa_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr; struct strapush sa, *sap = &sa; sap32_convert(sap32, sap); if ((err = apush_lst(sap))) goto nak; sap32_revert(sap, sap32); } else #endif { struct strapush *sap; sap = (typeof(sap)) dp->b_rptr; if ((err = apush_lst(sap))) goto nak; } if (sad->transparent == 1) { mp->b_datap->db_type = M_COPYOUT; ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; sad->transparent = 1; sad->iocstate = 2; qreply(q, mp); return (0); } /* use implied I_STR copyout */ count = sa_size; goto ack; case 2: /* done */ goto ack; } err = -EIO; goto nak; case SAD_VML: switch (sad->iocstate) { case 1: sad_vml_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sl_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct str_list32 *slp32 = (typeof(slp32)) dp->b_rptr; sad->sl.sl_nmods = slp32->sl_nmods; sad->sl.sl_modlist = (struct str_mlist *) (unsigned long) slp32->sl_modlist; } else #endif { struct str_list *slp = (typeof(slp)) dp->b_rptr; sad->sl.sl_nmods = slp->sl_nmods; sad->sl.sl_modlist = slp->sl_modlist; } err = -EINVAL; if (1 > sad->sl.sl_nmods || sad->sl.sl_nmods > MAXAPUSH) goto nak; mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = (caddr_t) sad->sl.sl_modlist; ioc->copyreq.cq_size = sad->sl.sl_nmods * sizeof(struct str_mlist); ioc->copyreq.cq_flag = 0; sad->iocstate = 2; qreply(q, mp); return (0); case 2: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sad->sl.sl_nmods * sizeof(struct str_mlist)) goto nak; sad->sl.sl_modlist = (struct str_mlist *) dp->b_rptr; if ((err = apush_vml(&sad->sl)) < 0) goto nak; rval = err; goto ack; } err = -EIO; goto nak; } } abort: freemsg(mp); return (0); nak: sad->iocstate = 0; mp->b_datap->db_type = M_IOCNAK; ioc->iocblk.ioc_count = 0; ioc->iocblk.ioc_rval = -1; ioc->iocblk.ioc_error = -err; sad->transparent = 0; sad->iocstate = 0; qreply(q, mp); return (0); ack: sad->iocstate = 0; mp->b_datap->db_type = M_IOCACK; ioc->iocblk.ioc_count = count; ioc->iocblk.ioc_rval = rval; ioc->iocblk.ioc_error = 0; sad->transparent = 0; sad->iocstate = 0; qreply(q, mp); return (0); }