/* * dm2s_prep_scatgath - Prepare scatter/gather elements for transmission * of a streams message. */ static int dm2s_prep_scatgath(mblk_t *mp, uint32_t *numsg, mscat_gath_t *sgp, int maxsg) { uint32_t num = 0; mblk_t *tmp = mp; while ((tmp != NULL) && (num < maxsg)) { sgp[num].msc_dptr = (caddr_t)tmp->b_rptr; sgp[num].msc_len = MBLKL(tmp); tmp = tmp->b_cont; num++; } if (tmp != NULL) { /* * Number of scatter/gather elements available are not * enough, so lets pullup the msg. */ if (pullupmsg(mp, -1) != 1) { return (EAGAIN); } sgp[0].msc_dptr = (caddr_t)mp->b_rptr; sgp[0].msc_len = MBLKL(mp); num = 1; } *numsg = num; return (0); }
/* * parse_packet6(packet, mp) * * parses the message block into a ipgpc_packet_t structure for IPv6 traffic */ void parse_packet6(ipgpc_packet_t *packet, mblk_t *mp) { ip6_t *ip6h = (ip6_t *)mp->b_rptr; /* parse message block for IP header and ports */ bcopy(ip6h->ip6_src.s6_addr32, packet->saddr.s6_addr32, sizeof (ip6h->ip6_src.s6_addr32)); bcopy(ip6h->ip6_dst.s6_addr32, packet->daddr.s6_addr32, sizeof (ip6h->ip6_dst.s6_addr32)); /* Will be (re-)assigned in get_port_info */ packet->proto = ip6h->ip6_nxt; packet->dsfield = __IPV6_TCLASS_FROM_FLOW(ip6h->ip6_vcf); find_ids(packet, mp); packet->len = msgdsize(mp); packet->sport = 0; packet->dport = 0; /* Need to pullup everything. */ if (mp->b_cont != NULL) { if (!pullupmsg(mp, -1)) { ipgpc0dbg(("parse_packet6(): pullup error, can't " \ "find ports")); return; } ip6h = (ip6_t *)mp->b_rptr; } get_port_info(packet, ip6h, AF_INET6, mp); }
static bool_t xdrmblk_getint32(XDR *xdrs, int32_t *int32p) { mblk_t *m; /* LINTED pointer alignment */ m = (mblk_t *)xdrs->x_base; if (m == NULL) return (FALSE); /* * If the pointer is not aligned or there is not * enough bytes, pullupmsg to get enough bytes and * align the mblk. */ if (!IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)) || xdrs->x_handy < sizeof (int32_t)) { while (!pullupmsg(m, sizeof (int32_t))) { /* * Could have failed due to not * enough data or an allocb failure. */ if (xmsgsize(m) < sizeof (int32_t)) return (FALSE); delay(hz); } xdrs->x_handy = (int)(m->b_wptr - m->b_rptr); } /* LINTED pointer alignment */ *int32p = ntohl(*((int32_t *)(m->b_rptr))); m->b_rptr += sizeof (int32_t); /* * Instead of leaving handy as 0 causing more pullupmsg's * simply move to the next mblk. */ if ((xdrs->x_handy -= sizeof (int32_t)) == 0) { m = m->b_cont; xdrs->x_base = (caddr_t)m; if (m != NULL) xdrs->x_handy = (int)(m->b_wptr - m->b_rptr); } return (TRUE); }
static bool_t xdrmblk_getint32(XDR *xdrs, int32_t *int32p) { mblk_t *m; struct xdrmblk_params *p; xdrmblk_skip_fully_read_mblks(xdrs); /* LINTED pointer alignment */ m = (mblk_t *)xdrs->x_base; if (m == NULL) return (FALSE); p = (struct xdrmblk_params *)xdrs->x_private; /* * If the pointer is not aligned or there is not * enough bytes, pullupmsg to get enough bytes and * align the mblk. */ if (!IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)) || xdrs->x_handy < sizeof (int32_t)) { while (!pullupmsg(m, sizeof (int32_t))) { /* * Could have failed due to not * enough data or an allocb failure. */ if (xmsgsize(m) < sizeof (int32_t)) return (FALSE); delay(hz); } p->apos += p->rpos; p->rpos = 0; xdrs->x_handy = (int)MBLKL(m); } /* LINTED pointer alignment */ *int32p = ntohl(*((int32_t *)(m->b_rptr))); m->b_rptr += sizeof (int32_t); xdrs->x_handy -= sizeof (int32_t); p->rpos += sizeof (int32_t); return (TRUE); }
/*ARGSUSED*/ static int ip_isvalidchecksum(net_handle_t neti, mblk_t *mp) { unsigned char *wptr; ipha_t *ipha = (ipha_t *)mp->b_rptr; int hlen; int ret; ASSERT(mp != NULL); if (dohwcksum && DB_CKSUM16(mp) != 0xFFFF && (DB_CKSUMFLAGS(mp) & HCK_FULLCKSUM) && (DB_CKSUMFLAGS(mp) & HCK_FULLCKSUM_OK) && (DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) return (1); hlen = (ipha->ipha_version_and_hdr_length & 0x0F) << 2; /* * Check that the mblk being passed in has enough data in it * before blindly checking ip_cksum. */ if (msgdsize(mp) < hlen) return (0); if (mp->b_wptr < mp->b_rptr + hlen) { if (pullupmsg(mp, hlen) == 0) return (0); wptr = mp->b_wptr; } else { wptr = mp->b_wptr; mp->b_wptr = mp->b_rptr + hlen; } if (ipha->ipha_hdr_checksum == ip_cksum(mp, 0, ipha->ipha_hdr_checksum)) ret = 1; else ret = 0; mp->b_wptr = wptr; return (ret); }
/* * SMCG_send() -- send a packet */ static int SMCG_send(gld_mac_info_t *macinfo, mblk_t *mp) { smcg_t *smcg = (smcg_t *)macinfo->gldm_private; Adapter_Struc *pAd = smcg->smcg_pAd; int i = 0, j = 0, totlen = 0, msglen = 0, rc; mblk_t *mptr = mp; Data_Buff_Structure dbuf; ddi_dma_cookie_t cookie; unsigned int ncookies; for (; mptr != NULL; i++, mptr = mptr->b_cont) { if (i >= SMCG_MAX_TX_MBLKS) { if (pullupmsg(mp, -1) == 0) { smcg->smcg_need_gld_sched = 1; return (GLD_NORESOURCES); /* retry send */ } msglen = (mp->b_wptr - mp->b_rptr); break; } msglen += (mptr->b_wptr - mptr->b_rptr); } if (msglen > ETHERMAX) { cmn_err(CE_WARN, SMCG_NAME "%d: dropping oversize packet (%d)", macinfo->gldm_ppa, msglen); return (GLD_BADARG); } mutex_enter(&smcg->txbuf_lock); mutex_enter(&smcg->lm_lock); LM_Reap_Xmits(pAd); mutex_exit(&smcg->lm_lock); if ((smcg->tx_ring_head + 1) % pAd->num_of_tx_buffs == smcg->tx_ring_tail) { smcg->smcg_need_gld_sched = 1; mutex_exit(&smcg->txbuf_lock); return (GLD_NORESOURCES); /* retry send */ } for (mptr = mp, i = 0; mptr != NULL; mptr = mptr->b_cont) { int blocklen = mptr->b_wptr - mptr->b_rptr; if (blocklen == 0) continue; ASSERT(i < SMCG_MAX_TX_MBLKS); rc = ddi_dma_addr_bind_handle( smcg->tx_info[smcg->tx_ring_head].dmahandle[i], NULL, (caddr_t)mptr->b_rptr, (size_t)blocklen, DDI_DMA_WRITE, DDI_DMA_DONTWAIT, 0, &cookie, &ncookies); if (rc != DDI_DMA_MAPPED) { while (--i >= 0) (void) ddi_dma_unbind_handle( smcg->tx_info[smcg->tx_ring_head]. dmahandle[i]); if (rc == DDI_DMA_NORESOURCES) { smcg->smcg_need_gld_sched = 1; mutex_exit(&smcg->txbuf_lock); return (GLD_NORESOURCES); } #ifdef DEBUG if (SMCG_debug & SMCGTRACE) cmn_err(CE_WARN, SMCG_NAME "Send bind handle failure = 0x%x", rc); #endif mutex_exit(&smcg->txbuf_lock); return (GLD_FAILURE); } /* CONSTANTCONDITION */ while (1) { dbuf.fragment_list[j].fragment_length = cookie.dmac_size | PHYSICAL_ADDR; dbuf.fragment_list[j].fragment_ptr = (unsigned char *)(uintptr_t)cookie.dmac_address; j++; if (--ncookies == 0) break; ddi_dma_nextcookie( smcg->tx_info[smcg->tx_ring_head].dmahandle[i], &cookie); } i++; totlen += blocklen; } dbuf.fragment_count = j; smcg->tx_info[smcg->tx_ring_head].handles_bound = i; smcg->tx_info[smcg->tx_ring_head].mptr = mp; if (totlen < ETHERMIN) totlen = ETHERMIN; /* pad if necessary */ mutex_enter(&smcg->lm_lock); pAd->xmit_interrupts = (smcg->smcg_need_gld_sched) ? 1 : 0; rc = LM_Send(&dbuf, pAd, totlen); mutex_exit(&smcg->lm_lock); if (rc != SUCCESS) { for (i = 0; i < smcg->tx_info[smcg->tx_ring_head].handles_bound; i++) (void) ddi_dma_unbind_handle( smcg->tx_info[smcg->tx_ring_head].dmahandle[i]); } else smcg->tx_ring_head = (smcg->tx_ring_head+1) % pAd->num_of_tx_buffs; mutex_exit(&smcg->txbuf_lock); #ifdef DEBUG if (rc != SUCCESS && rc != OUT_OF_RESOURCES) cmn_err(CE_WARN, SMCG_NAME "_send: LM_Send failed %d", rc); #endif if (rc == SUCCESS) { return (GLD_SUCCESS); } else if (rc == OUT_OF_RESOURCES) { smcg->smcg_need_gld_sched = 1; return (GLD_NORESOURCES); } else { return (GLD_FAILURE); } }
/** * ptem_r_msg - process a message on the read side * @q: read queue * @mp: the message to process * * Returns 1 when the caller (putp or srvp) needs to queue or requeue the * message. Returns 0 when the message has been disposed and the caller must * release its reference to mp. * * Keep this away from the fast path. */ static streams_noinline __unlikely void ptem_r_msg(queue_t *q, mblk_t *mp) { struct iocblk *ioc = (typeof(ioc)) mp->b_rptr; int error = EINVAL; int count = 0; int rval = 0; mblk_t *bp; /* The Stream head is set to recognized all transparent terminal input-output controls and pass them downstream as though they were I_STR input-output controls. There is also the opportunity to register input-output controls with the Stream head using the TIOC_REPLY message. */ if (unlikely(ioc->ioc_count == TRANSPARENT)) { __swerr(); goto nak; } if (!(bp = mp->b_cont)) goto nak; switch (ioc->ioc_cmd) { case TCSBRK: /* When the ptem module receives an M_IOCTL message of type TCSBRK on its read-side queue, it sends an M_IOCACK message downstream and an M_BREAK message upstream. */ if (!pullupmsg(bp, sizeof(int))) goto nak; if (!putnextctl(q, M_BREAK)) { error = EAGAIN; goto nak; } goto ack; case TIOCGWINSZ: { /* Keeps track of the informaiton needed for teh TIOCSWINSZ, TIOCGWINSZ, JWINSIZE, input-output control commands. */ struct ptem *p = PTEM_PRIV(q); extern void __struct_winsize_is_too_large_for_fastbuf(void); if (!(p->flags & PTEM_HAVE_WINSIZE)) goto nak; if (FASTBUF < sizeof(struct winsize)) { __struct_winsize_is_too_large_for_fastbuf(); } /* always have room in a fastbuf */ count = sizeof(p->ws); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; *(struct winsize *) bp->b_rptr = p->ws; goto ack; } #ifdef JWINSIZE case JWINSIZE: /* Keeps track of the informaiton needed for teh TIOCSWINSZ, TIOCGWINSZ, JWINSIZE, input-output control commands. */ { struct ptem *p = PTEM_PRIV(q); struct jwinsize *jws; if (!(p->flags & PTEM_HAVE_WINSIZE)) goto nak; if (FASTBUF < sizeof(struct jwinsize)) __undefined_call_makes_compile_fail(); /* always have room in a fastbuf */ count = sizeof(*jws); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; jws = (typeof(jws)) bp->b_rptr; jws->bytesx = p->ws.ws_col; jws->bytesy = p->ws.ws_row; jws->bitsx = p->ws.ws_xpixel; jws->bitsy = p->ws.ws_ypixel; goto ack; } #endif /* JWINSIZE */ case TIOCSWINSZ: /* Keeps track of the informaiton needed for teh TIOCSWINSZ, TIOCGWINSZ, JWINSIZE, input-output control commands. */ { struct ptem *p = PTEM_PRIV(q); struct winsize *ws; int changed = 0; int zeroed = !(p->flags & PTEM_HAVE_WINSIZE); mblk_t *mb; if (!pullupmsg(bp, sizeof(*ws))) goto nak; if (!(mb = allocb(1, BPRI_MED))) { error = EAGAIN; goto nak; } ws = (typeof(ws)) bp->b_rptr; if (ws->ws_col != p->ws.ws_col) { if ((p->ws.ws_col = ws->ws_col)) zeroed = 0; changed = 1; } if (ws->ws_row != p->ws.ws_row) { if ((p->ws.ws_row = ws->ws_row)) zeroed = 0; changed = 1; } if (ws->ws_xpixel != p->ws.ws_xpixel) { if ((p->ws.ws_xpixel = ws->ws_xpixel)) zeroed = 0; changed = 1; } if (ws->ws_ypixel != p->ws.ws_ypixel) { if ((p->ws.ws_ypixel = ws->ws_ypixel)) zeroed = 0; changed = 1; } if (zeroed) p->flags &= ~PTEM_HAVE_WINSIZE; else p->flags |= PTEM_HAVE_WINSIZE; if (changed) { mb->b_datap->db_type = M_SIG; *mb->b_wptr++ = SIGWINCH; putnext(q, mb); } else freeb(mb); count = 0; goto ack; } #ifdef TIOCSIGNAL case TIOCSIGNAL: #endif /* TIOCSIGNAL */ #ifdef O_TIOCSIGNAL case O_TIOCSIGNAL: #endif /* O_TIOCSIGNAL */ { uint s; if (!pullupmsg(bp, sizeof(s))) goto nak; if ((s = *(uint *) bp->b_rptr) > _NSIG || s == 0) goto nak; if (!putnextctl1(q, M_PCSIG, s)) { error = EAGAIN; goto nak; } count = 0; goto ack; } #ifdef TIOCREMOTE case TIOCREMOTE: #endif /* TIOCREMOTE */ #ifdef O_TIOCREMOTE case O_TIOCREMOTE: #endif /* O_TIOCREMOTE */ { struct ptem *p = PTEM_PRIV(q); struct iocblk *ctl; mblk_t *mb; if (!pullupmsg(bp, sizeof(uint))) goto nak; if (!(mb = allocb(sizeof(*ctl), BPRI_MED))) { error = EAGAIN; goto nak; } mb->b_datap->db_type = M_CTL; ctl = (typeof(ctl)) mb->b_rptr; mb->b_wptr += sizeof(*ctl); bzero(ctl, sizeof(ctl)); if (*(uint *) bp->b_rptr) { ctl->ioc_cmd = MC_NO_CANON; p->flags |= PTEM_REMOTE_MODE; } else { ctl->ioc_cmd = MC_DO_CANON; p->flags &= ~PTEM_REMOTE_MODE; } putnext(q, mb); count = 0; goto ack; } default: break; } putnext(q, mp); return; ack: mp->b_datap->db_type = M_IOCNAK; ioc->ioc_error = error; ioc->ioc_rval = -1; ioc->ioc_count = 0; reply: qreply(q, mp); return; nak: mp->b_datap->db_type = M_IOCACK; ioc->ioc_error = error; ioc->ioc_rval = rval; ioc->ioc_count = count; goto reply; }
/** * ptem_w_msg - process a message on the write side * @q: write queue * @mp: message to process * * Returns 1 when the caller (putp or srvp) needs to queue or requeue the * message. Returns 0 when the message has been disposed and the caller must * release its reference to mp. * * Keep this function out of the way of the fastpath. */ static streams_noinline int ptem_w_msg(queue_t *q, mblk_t *mp) { struct ptem *p = PTEM_PRIV(q); /* fast path */ if (likely(mp->b_datap->db_type == M_DATA)) { m_data: if ((p->flags & PTEM_OUTPUT_STOPPED) || (q->q_first != NULL) || (q->q_flag & QSVCBUSY) || (!bcanputnext(q, mp->b_band))) return (1); putnext(q, mp); return (0); } switch (mp->b_datap->db_type) { case M_DATA: goto m_data; case M_IOCTL: { struct iocblk *ioc = (struct iocblk *) mp->b_rptr; int error = EINVAL; int rval = 0; int count = 0; mblk_t *bp, *cp; /* The Stream head is set to recognized all transparent terminal input-output controls and pass them downstream as though they were I_STR input-output controls. There is also the opportunity to register input-output controls with the Stream head using the TIOC_REPLY message. */ if (ioc->ioc_count == TRANSPARENT) { __swerr(); goto nak; } if ((bp = mp->b_cont) == NULL) goto nak; switch (ioc->ioc_cmd) { case TCSETAF: /* Note, if properly handled the M_FLUSH message will never be queued and upon successful return from this function, we have already processed the read-side flush along the entire Stream. */ if (!putnextctl1(q, M_FLUSH, FLUSHR)) { error = EAGAIN; goto nak; } /* fall through */ case TCSETAW: /* Note, output should have already drained. */ /* fall through */ case TCSETA: { struct termio *c; mblk_t *zp; if (!pullupmsg(bp, sizeof(struct termio))) goto nak; c = (typeof(c)) bp->b_rptr; if ((c->c_cflag & CBAUD) == B0) { /* slave hangup */ if ((zp = xchg(&p->zero, NULL))) putnext(q, zp); } else { if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } p->c.c_iflag = (p->c.c_iflag & 0xffff0000) | c->c_iflag; p->c.c_oflag = (p->c.c_oflag & 0xffff0000) | c->c_oflag; p->c.c_cflag = (p->c.c_cflag & 0xffff0000) | c->c_cflag; p->c.c_lflag = (p->c.c_lflag & 0xffff0000) | c->c_lflag; p->c.c_line = c->c_line; bcopy(c->c_cc, p->c.c_cc, NCC); putnext(q, cp); } goto ack; } case TCSETSF: /* Note, if properly handled the M_FLUSH message will never be queued and upon successful return from this function, we have already processed the read-side flush along the entire Stream. */ if (!putnextctl1(q, M_FLUSH, FLUSHR)) { error = EAGAIN; goto nak; } /* fall through */ case TCSETSW: /* Note, output should have already drained. */ /* fall through */ case TCSETS: { struct termios *c; mblk_t *zp; if (!pullupmsg(bp, sizeof(struct termios))) goto nak; c = (typeof(c)) bp->b_rptr; if ((c->c_cflag & CBAUD) == B0) { /* slave hangup */ if ((zp = xchg(&p->zero, NULL))) putnext(q, zp); } else { if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } p->c = *c; putnext(q, cp); } goto ack; } case TCGETA: { struct termio *c; extern void __struct_termio_is_too_large_for_fastbuf(void); if (FASTBUF < sizeof(struct termio)) __struct_termio_is_too_large_for_fastbuf(); count = sizeof(*c); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; c = (typeof(c)) bp->b_rptr; c->c_iflag = p->c.c_iflag; c->c_oflag = p->c.c_oflag; c->c_cflag = p->c.c_cflag; c->c_lflag = p->c.c_lflag; c->c_line = p->c.c_line; bcopy(p->c.c_cc, p->c.c_cc, NCC); goto ack; } case TCGETS: { extern void __struct_termios_is_too_large_for_fastbuf(void); if (FASTBUF < sizeof(struct termios)) __struct_termios_is_too_large_for_fastbuf(); count = sizeof(p->c); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; *((struct termios *) bp->b_rptr) = p->c; goto ack; } case TIOCGWINSZ: { extern void __struct_winsize_is_too_large_for_fastbuf(void); if (!(p->flags & PTEM_HAVE_WINSIZE)) goto nak; if (FASTBUF < sizeof(struct winsize)) __struct_winsize_is_too_large_for_fastbuf(); count = sizeof(p->ws); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; *((struct winsize *) bp->b_rptr) = p->ws; goto ack; } #ifdef JWINSIZE case JWINSIZE: { struct jwinsize *jws; extern void __struct_jwinsize_is_too_large_for_fastbuf(void); if (!(p->flags & PTEM_HAVE_WINSIZE)) goto nak; if (FASTBUF < sizeof(struct jwinsize)) __struct_jwinsize_is_too_large_for_fastbuf(); /* always have room in a fastbuf */ count = sizeof(*jws); bp->b_rptr = bp->b_datap->db_base; bp->b_wptr = bp->b_rptr + count; jws = (typeof(jws)) bp->b_rptr; jws->bytesx = p->ws.ws_col; jws->bytesy = p->ws.ws_row; jws->bitsx = p->ws.ws_xpixel; jws->bitsy = p->ws.ws_ypixel; goto ack; } #endif /* JWINSIZE */ case TIOCSWINSZ: { struct winsize *ws; int changed = 0; int zeroed = !(p->flags & PTEM_HAVE_WINSIZE); mblk_t *mb; if (!pullupmsg(bp, sizeof(*ws))) goto nak; if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } if (!(mb = allocb(1, BPRI_MED))) { freemsg(cp); error = EAGAIN; goto nak; } ws = (typeof(ws)) bp->b_rptr; if (ws->ws_col != p->ws.ws_col) { if ((p->ws.ws_col = ws->ws_col)) zeroed = 0; changed = 1; } if (ws->ws_row != p->ws.ws_row) { if ((p->ws.ws_row = ws->ws_row)) zeroed = 0; changed = 1; } if (ws->ws_xpixel != p->ws.ws_xpixel) { if ((p->ws.ws_xpixel = ws->ws_xpixel)) zeroed = 0; changed = 1; } if (ws->ws_ypixel != p->ws.ws_ypixel) { if ((p->ws.ws_ypixel = ws->ws_ypixel)) zeroed = 0; changed = 1; } if (zeroed) p->flags &= ~PTEM_HAVE_WINSIZE; else p->flags |= PTEM_HAVE_WINSIZE; if (changed) { mb->b_datap->db_type = M_SIG; *mb->b_wptr++ = SIGWINCH; qreply(q, mb); } else freeb(mb); putnext(q, cp); /* copy for pctk(4) */ count = 0; goto ack; } case TCSBRK: if (!(cp = copymsg(mp))) { error = EAGAIN; goto nak; } putnext(q, cp); count = 0; goto ack; default: goto nak; } break; ack: mp->b_datap->db_type = M_IOCACK; ioc->ioc_error = 0; ioc->ioc_rval = rval; ioc->ioc_count = count; goto reply; nak: mp->b_datap->db_type = M_IOCNAK; ioc->ioc_error = error; ioc->ioc_rval = -1; ioc->ioc_count = 0; reply: qreply(q, mp); break; } case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); } putnext(q, mp); break; default: if (mp->b_datap->db_type < QPCTL) { if ((q->q_first != NULL) || (q->q_flag & QSVCBUSY) || (!bcanputnext(q, mp->b_band))) return (1); /* (re)queue */ } putnext(q, mp); break; } return (0); }
/* ARGSUSED */ int tswtcl_process(mblk_t **mpp, tswtcl_data_t *tswtcl_data, ipp_action_id_t *next_action) { ipha_t *ipha; hrtime_t now; ip6_t *ip6_hdr; uint32_t pkt_len; mblk_t *mp = *mpp; hrtime_t deltaT; uint64_t bitsinwin; uint32_t min = 0, additive, rnd; tswtcl_cfg_t *cfg_parms = tswtcl_data->cfg_parms; if (mp == NULL) { tswtcl0dbg(("tswtcl_process: null mp!\n")); atomic_add_64(&tswtcl_data->epackets, 1); return (EINVAL); } if (mp->b_datap->db_type != M_DATA) { if ((mp->b_cont != NULL) && (mp->b_cont->b_datap->db_type == M_DATA)) { mp = mp->b_cont; } else { tswtcl0dbg(("tswtcl_process: no data\n")); atomic_add_64(&tswtcl_data->epackets, 1); return (EINVAL); } } /* Figure out the ToS/Traffic Class and length from the message */ if ((mp->b_wptr - mp->b_rptr) < IP_SIMPLE_HDR_LENGTH) { if (!pullupmsg(mp, IP_SIMPLE_HDR_LENGTH)) { tswtcl0dbg(("tswtcl_process: pullup error\n")); atomic_add_64(&tswtcl_data->epackets, 1); return (EINVAL); } } ipha = (ipha_t *)mp->b_rptr; if (IPH_HDR_VERSION(ipha) == IPV4_VERSION) { pkt_len = ntohs(ipha->ipha_length); } else { ip6_hdr = (ip6_t *)mp->b_rptr; pkt_len = ntohs(ip6_hdr->ip6_plen) + ip_hdr_length_v6(mp, ip6_hdr); } /* Convert into bits */ pkt_len <<= 3; /* Get current time */ now = gethrtime(); /* Update the avg_rate and win_front tswtcl_data */ mutex_enter(&tswtcl_data->tswtcl_lock); /* avg_rate = bits/sec and window in msec */ bitsinwin = ((uint64_t)tswtcl_data->avg_rate * cfg_parms->window / 1000) + pkt_len; deltaT = now - tswtcl_data->win_front + cfg_parms->nsecwindow; tswtcl_data->avg_rate = (uint64_t)bitsinwin * METER_SEC_TO_NSEC / deltaT; tswtcl_data->win_front = now; if (tswtcl_data->avg_rate <= cfg_parms->committed_rate) { *next_action = cfg_parms->green_action; } else if (tswtcl_data->avg_rate <= cfg_parms->peak_rate) { /* * Compute the probability: * * p0 = (avg_rate - committed_rate) / avg_rate * * Yellow with probability p0 * Green with probability (1 - p0) * */ uint32_t aminusc; /* Get a random no. betweeen 0 and avg_rate */ (void) random_get_pseudo_bytes((uint8_t *)&additive, sizeof (additive)); rnd = min + (additive % (tswtcl_data->avg_rate - min + 1)); aminusc = tswtcl_data->avg_rate - cfg_parms->committed_rate; if (aminusc >= rnd) { *next_action = cfg_parms->yellow_action; } else { *next_action = cfg_parms->green_action; } } else { /* * Compute the probability: * * p1 = (avg_rate - peak_rate) / avg_rate * p2 = (peak_rate - committed_rate) / avg_rate * * Red with probability p1 * Yellow with probability p2 * Green with probability (1 - (p1 + p2)) * */ uint32_t aminusp; /* Get a random no. betweeen 0 and avg_rate */ (void) random_get_pseudo_bytes((uint8_t *)&additive, sizeof (additive)); rnd = min + (additive % (tswtcl_data->avg_rate - min + 1)); aminusp = tswtcl_data->avg_rate - cfg_parms->peak_rate; if (aminusp >= rnd) { *next_action = cfg_parms->red_action; } else if ((cfg_parms->pminusc + aminusp) >= rnd) { *next_action = cfg_parms->yellow_action; } else { *next_action = cfg_parms->green_action; } } mutex_exit(&tswtcl_data->tswtcl_lock); /* Update Stats */ if (*next_action == cfg_parms->green_action) { atomic_add_64(&tswtcl_data->green_packets, 1); atomic_add_64(&tswtcl_data->green_bits, pkt_len); } else if (*next_action == cfg_parms->yellow_action) { atomic_add_64(&tswtcl_data->yellow_packets, 1); atomic_add_64(&tswtcl_data->yellow_bits, pkt_len); } else { ASSERT(*next_action == cfg_parms->red_action); atomic_add_64(&tswtcl_data->red_packets, 1); atomic_add_64(&tswtcl_data->red_bits, pkt_len); } return (0); }
/* * Read-side put procedure. It's responsible for applying the * packet filter and passing upstream message on or discarding it * depending upon the results. * * Upstream messages can start with zero or more M_PROTO mblks * which are skipped over before executing the packet filter * on any remaining M_DATA mblks. */ static void pfrput(queue_t *rq, mblk_t *mp) { struct epacketfilt *pfp = (struct epacketfilt *)rq->q_ptr; mblk_t *mbp, *mpp; struct packdesc pd; int need; ASSERT(pfp); switch (DB_TYPE(mp)) { case M_PROTO: case M_DATA: /* * Skip over protocol information and find the start * of the message body, saving the overall message * start in mpp. */ for (mpp = mp; mp && (DB_TYPE(mp) == M_PROTO); mp = mp->b_cont) ; /* * Null body (exclusive of M_PROTO blocks) ==> accept. * Note that a null body is not the same as an empty body. */ if (mp == NULL) { putnext(rq, mpp); break; } /* * Pull the packet up to the length required by * the filter. Note that doing so destroys sharing * relationships, which is unfortunate, since the * results of pulling up here are likely to be useful * for shared messages applied to a filter on a sibling * stream. * * Most packet sources will provide the packet in two * logical pieces: an initial header in a single mblk, * and a body in a sequence of mblks hooked to the * header. We're prepared to deal with variant forms, * but in any case, the pullup applies only to the body * part. */ mbp = mp->b_cont; need = pfp->pf_PByteLen; if (mbp && (MBLKL(mbp) < need)) { int len = msgdsize(mbp); /* XXX discard silently on pullupmsg failure */ if (pullupmsg(mbp, MIN(need, len)) == 0) { freemsg(mpp); break; } } /* * Misalignment (not on short boundary) ==> reject. */ if (((uintptr_t)mp->b_rptr & (sizeof (ushort_t) - 1)) || (mbp != NULL && ((uintptr_t)mbp->b_rptr & (sizeof (ushort_t) - 1)))) { freemsg(mpp); break; } /* * These assignments are distasteful, but necessary, * since the packet filter wants to work in terms of * shorts. Odd bytes at the end of header or data can't * participate in the filtering operation. */ pd.pd_hdr = (ushort_t *)mp->b_rptr; pd.pd_hdrlen = (mp->b_wptr - mp->b_rptr) / sizeof (ushort_t); if (mbp) { pd.pd_body = (ushort_t *)mbp->b_rptr; pd.pd_bodylen = (mbp->b_wptr - mbp->b_rptr) / sizeof (ushort_t); } else { pd.pd_body = NULL; pd.pd_bodylen = 0; } /* * Apply the filter. */ if (FilterPacket(&pd, pfp)) putnext(rq, mpp); else freemsg(mpp); break; default: putnext(rq, mp); break; } }