static fastcall inline int ll_w_msg_put(queue_t *q, mblk_t *mp) { if (likely(DB_TYPE(mp) == M_DATA)) return (-EAGAIN); /* fast path for data */ if (likely(DB_TYPE(mp) == M_PROTO)) return (-EAGAIN); /* fast path for data */ switch (DB_TYPE(mp)) { case M_DATA: case M_HPDATA: case M_PROTO: case M_PCPROTO: case M_CTL: case M_PCCTL: case M_SIG: case M_PCSIG: case M_RSE: case M_PCRSE: return (-EAGAIN); case M_FLUSH: return ll_m_flush(q, mp); default: return ll_m_other(q, mp); } }
static fastcall noinline int ll_r_msg_srv(queue_t *q, mblk_t *mp) { struct ll *ll = LL_R_PRIV(q); if (likely(DB_TYPE(mp) == M_PROTO)) return ll_m_proto(ll, q, mp); switch (DB_TYPE(mp)) { case M_DATA: case M_HPDATA: return ll_m_data(ll, q, mp); case M_PROTO: case M_PCPROTO: return ll_m_proto(ll, q, mp); case M_SIG: case M_PCSIG: return ll_m_sig(q, mp); case M_CTL: case M_PCCTL: return ll_m_ctl(ll, q, mp); case M_RSE: case M_PCRSE: return ll_m_rse(q, mp); case M_FLUSH: return ll_m_flush(q, mp); default: return ll_m_other(q, mp); } }
/** * ch_msg: - process message from queue * @q: queue * @mp: the message to process * * This simply flows data messages through with flow control and intercepts * all other messages. */ static inline fastcall __hot int ch_msg(queue_t *q, mblk_t *mp) { struct ch *ch = CH_PRIV(q); if (test_bit(CH_ENABLE_BIT, &ch->flags)) { if (likely(DB_TYPE(mp) == M_DATA)) { if (likely(bcanputnext(q, mp->b_band))) { putnext(q, mp); return (0); } return (-EBUSY); } else if (DB_TYPE(mp) == M_PROTO) { if (*(uint32_t *) mp->b_rptr = CH_DATA_REQ) { if (likely(bcanputnext(q, mp->b_band))) { *(uint32_t *) mp->b_rptr = CH_DATA_IND; putnext(q, mp); return (0); } return (-EBUSY); } /* go slow */ } } return ch_msg_slow(ch, q, mp); }
void ip_helper_wput(queue_t *q, mblk_t *mp) { struct iocblk *iocp = (struct iocblk *)mp->b_rptr; if (DB_TYPE(mp) == M_IOCTL && iocp->ioc_cmd == SIOCSQPTR) { ip_helper_share_conn(q, mp, iocp->ioc_cr); } else { /* We only handle ioctl related messages here */ ASSERT(DB_TYPE(mp) != M_DATA); ip_wput_nondata(q, mp); } }
void av1394_async_bus_reset(av1394_inst_t *avp) { av1394_async_t *ap = &avp->av_a; mblk_t *bp; AV1394_TNF_ENTER(av1394_async_bus_reset); (void) av1394_async_update_targetinfo(avp); mutex_enter(&ap->a_mutex); if (ap->a_nopen > 0) { mutex_exit(&ap->a_mutex); return; } mutex_exit(&ap->a_mutex); /* queue up a bus reset message */ if ((bp = allocb(1, BPRI_HI)) == NULL) { TNF_PROBE_0(av1394_async_bus_reset_error_allocb, AV1394_TNF_ASYNC_ERROR, ""); } else { DB_TYPE(bp) = AV1394_M_BUS_RESET; av1394_async_putq_rq(avp, bp); } AV1394_TNF_EXIT(av1394_async_bus_reset); }
static streamscall int zap_rput(queue_t *q, mblk_t *mp) { if ((!pcmsg(DB_TYPE(mp)) && (q->q_first || (q->q_flag & QSVCBUSY))) || zap_r_msg(q, mp)) putq(q, mp); return (0); }
static inline fastcall int zap_r_msg(queue_t *q, mblk_t *mp) { if (DB_TYPE(mp) == M_DATA) return zap_r_data(q, mp); return zap_r_msg_slow(q, mp); }
void handle_dump(queue_t *q, mblk_t *mp) { struct ctrl_args *ctrl_args; struct ctrl_device *ctrlfd; minor_t minor; int sum; unsigned char *buf; if ((mp->b_cont = allocb(MAXPRINTBUF, BPRI_MED)) == NULL) { printf("handle_ctrl: out of message blocks\n"); qreply(q, mp); return; } buf = DB_BASE(mp->b_cont); sum = rpc_dump(buf, buf + MAXPRINTBUF); sum += port_dump(buf + sum, buf + MAXPRINTBUF); sum += kid_dump(buf + sum, buf + MAXPRINTBUF); sum += flip_netdump(buf + sum, buf + MAXPRINTBUF); sum += int_dump(buf + sum, buf + MAXPRINTBUF); sum += adr_dump(buf + sum, buf + MAXPRINTBUF); sum += ff_dump(buf + sum, buf + MAXPRINTBUF); mp->b_cont->b_rptr = DB_BASE(mp->b_cont); mp->b_cont->b_wptr = mp->b_cont->b_rptr + sum; ctrl_args = (struct ctrl_args *) DB_BASE(mp); ctrl_args->ctrl_status = sum; mp->b_rptr = DB_BASE(mp); mp->b_wptr = mp->b_rptr + sizeof(struct ctrl_args); DB_TYPE(mp) = M_PROTO; qreply(q, mp); }
/* * Upstream messages are passed unchanged. * If a hangup occurs the target is no longer usable, so deprecate it. */ static int wcmrput(queue_t *q, mblk_t *mp) { if (DB_TYPE(mp) == M_HANGUP) /* Don't block waiting for outstanding operations to complete */ srpop(q->q_stream->sd_vnode, B_FALSE); putnext(q, mp); return (0); }
void mac_lso_get(mblk_t *mp, uint32_t *mss, uint32_t *flags) { ASSERT(DB_TYPE(mp) == M_DATA); if (flags != NULL) { *flags = DB_CKSUMFLAGS(mp) & HW_LSO; if ((*flags != 0) && (mss != NULL)) *mss = (uint32_t)DB_LSOMSS(mp); } }
void mac_hcksum_set(mblk_t *mp, uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, uint32_t flags) { ASSERT(DB_TYPE(mp) == M_DATA); DB_CKSUMSTART(mp) = (intptr_t)start; DB_CKSUMSTUFF(mp) = (intptr_t)stuff; DB_CKSUMEND(mp) = (intptr_t)end; DB_CKSUMFLAGS(mp) = (uint16_t)flags; DB_CKSUM16(mp) = (uint16_t)value; }
/** * ch_putp: - put procedure * @q: queue to which to put * @mp: message to put * * Simple flow-through queueing service procedure. */ static streamscall __hot int ch_putp(queue_t *q, mblk_t *mp) { if ((!pcmsg(DB_TYPE(mp)) && (q->q_first || (q->q_flag & QSVCBUSY))) || ch_msg(q, mp)) { if (putq(q, mp)) { swerr(); freemsg(mp); } } return (0); }
static void nca_wput(queue_t *q, mblk_t *mp) { struct iocblk *iocp; if (! (boolean_t)q->q_ptr) { iocp = (struct iocblk *)mp->b_rptr; if (DB_TYPE(mp) == M_IOCTL && iocp->ioc_cmd == NCA_SET_IF) { miocnak(q, mp, 0, ENOTSUP); return; } /* Module, passthrough */ putnext(q, mp); return; } switch (DB_TYPE(mp)) { case M_IOCTL: iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { case ND_SET: case ND_GET: if (! nd_getset(q, nca_g_nd, mp)) { miocnak(q, mp, 0, ENOENT); return; } qreply(q, mp); break; default: miocnak(q, mp, 0, ENOTSUP); break; } break; default: freemsg(mp); break; } }
/* * Given a Multidata message block, return the Multidata metadata handle. */ multidata_t * mmd_getmultidata(mblk_t *mp) { multidata_t *mmd; ASSERT(mp != NULL); if (DB_TYPE(mp) != M_MULTIDATA) return (NULL); mmd = (multidata_t *)mp->b_rptr; ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC); return (mmd); }
/* * Schedule a uioamove() on a mblk. This is done as mblks are enqueued * by the protocol on the socket's rcv queue. * * Caller must be holding so_lock. */ void sod_uioa_mblk_init(struct sodirect_s *sodp, mblk_t *mp, size_t msg_size) { uioa_t *uioap = &sodp->sod_uioa; mblk_t *mp1 = mp; mblk_t *lmp = NULL; ASSERT(DB_TYPE(mp) == M_DATA); ASSERT(msg_size == msgdsize(mp)); if (uioap->uioa_state & UIOA_ENABLED) { /* Uioa is enabled */ if (msg_size > uioap->uio_resid) { /* * There isn't enough uio space for the mblk_t chain * so disable uioa such that this and any additional * mblk_t data is handled by the socket and schedule * the socket for wakeup to finish this uioa. */ uioap->uioa_state &= UIOA_CLR; uioap->uioa_state |= UIOA_FINI; return; } do { uint32_t len = MBLKL(mp1); if (!uioamove(mp1->b_rptr, len, UIO_READ, uioap)) { /* Scheduled, mark dblk_t as such */ DB_FLAGS(mp1) |= DBLK_UIOA; } else { /* Error, turn off async processing */ uioap->uioa_state &= UIOA_CLR; uioap->uioa_state |= UIOA_FINI; break; } lmp = mp1; } while ((mp1 = mp1->b_cont) != NULL); if (mp1 != NULL || uioap->uio_resid == 0) { /* Break the mblk chain if neccessary. */ if (mp1 != NULL && lmp != NULL) { mp->b_next = mp1; lmp->b_cont = NULL; } } } }
static void uftdi_rxerr_put(mblk_t **rx_mpp, mblk_t *data, uint8_t lsr) { uchar_t errflg; if (lsr & FTDI_LSR_STATUS_BI) { /* * parity and framing errors only "count" if they * occur independently of a break being received. */ lsr &= ~(uint8_t)(FTDI_LSR_STATUS_PE | FTDI_LSR_STATUS_FE); } errflg = ((lsr & FTDI_LSR_STATUS_OE) ? DS_OVERRUN_ERR : 0) | ((lsr & FTDI_LSR_STATUS_PE) ? DS_PARITY_ERR : 0) | ((lsr & FTDI_LSR_STATUS_FE) ? DS_FRAMING_ERR : 0) | ((lsr & FTDI_LSR_STATUS_BI) ? DS_BREAK_ERR : 0); /* * If there's no actual data, we send a NUL character along * with the error flags. Otherwise, the data mblk contains * some number of highly questionable characters. * * According to FTDI tech support, there is no synchronous * error reporting i.e. we cannot assume that only the * first character in the mblk is bad -- so we treat all * of them them as if they have the error noted in the LSR. */ do { mblk_t *mp; uchar_t c = (MBLKL(data) == 0) ? '\0' : *data->b_rptr++; if ((mp = allocb(2, BPRI_HI)) != NULL) { DB_TYPE(mp) = M_BREAK; *mp->b_wptr++ = errflg; *mp->b_wptr++ = c; uftdi_put_tail(rx_mpp, mp); } else { /* * low memory - just discard the bad data */ data->b_rptr = data->b_wptr; break; } } while (MBLKL(data) > 0); }
int mqi_show_tables(uint32_t flags, char **buf, int len) { mqi_handle_t h; mqi_table_t *tbl; mqi_db_t *db; void *data; char *name; void *cursor; int i = 0; int j; MDB_CHECKARG(buf && len > 0, -1); MDB_PREREQUISITE(dbs && ndb > 0, -1); MDB_HASH_TABLE_FOR_EACH_WITH_KEY(table_name_hash, data, name, cursor) { if (i >= len) { errno = EOVERFLOW; return -1; } if ((h = data - NULL) == MQI_HANDLE_INVALID) continue; if (!(tbl = mdb_handle_get_data(table_handle, h)) || !(db = tbl->db)) continue; if (!(DB_TYPE(db) & flags)) continue; for (j = 0; j < i; j++) { if (strcasecmp(name, buf[j]) < 0) { memmove(buf + (j+1), buf + j, sizeof(char *) * (i-j)); break; } } buf[j] = name; i++; } return i; }
static noinline fastcall zap_w_msg_slow(queue_t *q, mblk_t *mp) { switch (DB_TYPE(mp)) { case M_DATA: return zap_w_data_slow(q, mp); case M_PROTO: case M_PCPROTO: return zap_w_proto(q, mp); case M_FLUSH: return zap_w_flush(q, mp); case M_IOCTL: case M_IOCDATA: return zap_w_ioctl(q, mp); case M_CTL: case M_PCCTL: return zap_w_ctl(q, mp); } freemsg(mp); return (0); }
static int sl_w_msg(queue_t *q, mblk_t *mp) { struct sl *mux = SL_PRIV(q); switch (DB_TYPE(mp)) { case M_IOCTL: return sl_w_ioctl(mux, q, mp); case M_IOCDATA: return sl_w_iocdata(mux, q, mp); } if (mux->other && mux->other->rq) { if (bcanputnext(mux->other->rq, mp->b_band)) { putnext(mux->other->rq, mp); return (0); } return (-EBUSY); } freemsg(mp); return (0); }
mblk_t * log_makemsg(int mid, int sid, int level, int sl, int pri, void *msg, size_t size, int on_intr) { mblk_t *mp = NULL; mblk_t *mp2; log_ctl_t *lc; if (size <= LOG_MSGSIZE && (on_intr || log_freeq->q_count > log_freeq->q_lowat)) mp = getq_noenab(log_freeq, 0); if (mp == NULL) { if (on_intr || (mp = allocb(sizeof (log_ctl_t), BPRI_HI)) == NULL || (mp2 = allocb(MAX(size, LOG_MSGSIZE), BPRI_HI)) == NULL) { freemsg(mp); return (NULL); } DB_TYPE(mp) = M_PROTO; mp->b_wptr += sizeof (log_ctl_t); mp->b_cont = mp2; } else { mp2 = mp->b_cont; mp2->b_wptr = mp2->b_rptr; } lc = (log_ctl_t *)mp->b_rptr; lc->mid = mid; lc->sid = sid; lc->level = level; lc->flags = sl; lc->pri = pri; bcopy(msg, mp2->b_wptr, size - 1); mp2->b_wptr[size - 1] = '\0'; mp2->b_wptr += strlen((char *)mp2->b_wptr) + 1; return (mp); }
/* * Create a Multidata message block. */ multidata_t * mmd_alloc(mblk_t *hdr_mp, mblk_t **mmd_mp, int kmflags) { uchar_t *buf; multidata_t *mmd; uint_t mmd_mplen; struct mmd_buf_info *buf_info; ASSERT(hdr_mp != NULL); ASSERT(mmd_mp != NULL); /* * Caller should never pass in a chain of mblks since we * only care about the first one, hence the assertions. */ ASSERT(hdr_mp->b_cont == NULL); if ((buf = kmem_cache_alloc(mmd_cache, kmflags)) == NULL) return (NULL); buf_info = (struct mmd_buf_info *)buf; buf_info->frp.free_arg = (caddr_t)buf; mmd = (multidata_t *)(buf_info + 1); mmd_mplen = sizeof (*mmd); if ((*mmd_mp = desballoc((uchar_t *)mmd, mmd_mplen, BPRI_HI, &(buf_info->frp))) == NULL) { kmem_cache_free(mmd_cache, buf); return (NULL); } DB_TYPE(*mmd_mp) = M_MULTIDATA; (*mmd_mp)->b_wptr += mmd_mplen; mmd->mmd_dp = (*mmd_mp)->b_datap; mmd->mmd_hbuf = hdr_mp; return (mmd); }
static noinline fastcall __unlikely int ch_msg_slow(struct ch *ch, queue_t *q, mblk_t *mp) { switch (DB_TYPE(mp)) { case M_DATA: return ch_m_data(ch, q, mp); case M_PROTO: case M_PCPROTO: return ch_m_proto(ch, q, mp); case M_FLUSH: return ch_m_flush(q, mp); case M_IOCTL: case M_IOCDATA: return ch_m_ioctl(ch, q, mp); case M_READ: return ch_m_read(ch, q, mp); default: swerr(); freemsg(mp); return (0); } }
/* ARGSUSED */ void ip_drop_packet(mblk_t *mp, boolean_t inbound, ill_t *arriving, ire_t *outbound_ire, struct kstat_named *counter, ipdropper_t *who_called) { mblk_t *ipsec_mp = NULL; ipsec_in_t *ii = NULL; ipsec_out_t *io = NULL; ipsec_info_t *in; uint8_t vers; if (mp == NULL) { /* * Return immediately - NULL packets should not affect any * statistics. */ return; } if (DB_TYPE(mp) == M_CTL) { in = (ipsec_info_t *)mp->b_rptr; if (in->ipsec_info_type == IPSEC_IN) ii = (ipsec_in_t *)in; else if (in->ipsec_info_type == IPSEC_OUT) io = (ipsec_out_t *)in; /* See if this is an ICMP packet (check for v4/v6). */ vers = (*mp->b_rptr) >> 4; if (vers != IPV4_VERSION && vers != IPV6_VERSION) { /* * If not, it's some other sort of M_CTL to be freed. * For now, treat it like an ordinary packet. */ ipsec_mp = mp; mp = mp->b_cont; } }
void mac_hcksum_get(mblk_t *mp, uint32_t *start, uint32_t *stuff, uint32_t *end, uint32_t *value, uint32_t *flags_ptr) { uint32_t flags; ASSERT(DB_TYPE(mp) == M_DATA); flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS; if ((flags & (HCK_PARTIALCKSUM | HCK_FULLCKSUM)) != 0) { if (value != NULL) *value = (uint32_t)DB_CKSUM16(mp); if ((flags & HCK_PARTIALCKSUM) != 0) { if (start != NULL) *start = (uint32_t)DB_CKSUMSTART(mp); if (stuff != NULL) *stuff = (uint32_t)DB_CKSUMSTUFF(mp); if (end != NULL) *end = (uint32_t)DB_CKSUMEND(mp); } } if (flags_ptr != NULL) *flags_ptr = flags; }
static noinline fastcall zap_r_msg_slow(queue_t *q, mblk_t *mp) { switch (DB_TYPE(mp)) { case M_DATA: return zap_r_data_slow(q, mp); case M_PROTO: case M_PCPROTO: return zap_r_proto(q, mp); case M_FLUSH: return zap_r_flush(q, mp); case M_COPYIN: case M_COPYOUT: return zap_r_ioctl(q, mp); case M_CTL: case M_PCCTL: return zap_r_ctl(q, mp); case M_SIG: case M_PCSIG: return zap_r_sig(q, mp); } freemsg(mp); return (0); }
/* *--------------------------------------------------------------------------- * * vmxnet3_tx -- * * Send packets on a vmxnet3 device. * * Results: * NULL in case of success or failure. * The mps to be retransmitted later if the ring is full. * * Side effects: * None. * *--------------------------------------------------------------------------- */ mblk_t * vmxnet3_tx(void *data, mblk_t *mps) { vmxnet3_softc_t *dp = data; vmxnet3_txqueue_t *txq = &dp->txQueue; vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl; vmxnet3_txstatus status = VMXNET3_TX_OK; mblk_t *mp; ASSERT(mps != NULL); do { vmxnet3_offload_t ol; int pullup; mp = mps; mps = mp->b_next; mp->b_next = NULL; if (DB_TYPE(mp) != M_DATA) { /* * PR #315560: Solaris might pass M_PROTO mblks for some reason. * Drop them because we don't understand them and because their * contents are not Ethernet frames anyway. */ ASSERT(B_FALSE); freemsg(mp); continue; } /* * Prepare the offload while we're still handling the original * message -- msgpullup() discards the metadata afterwards. */ pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp); if (pullup) { mblk_t *new_mp = msgpullup(mp, pullup); freemsg(mp); if (new_mp) { mp = new_mp; } else { continue; } } /* * Try to map the message in the Tx ring. * This call might fail for non-fatal reasons. */ status = vmxnet3_tx_one(dp, txq, &ol, mp, B_FALSE); if (status == VMXNET3_TX_PULLUP) { /* * Try one more time after flattening * the message with msgpullup(). */ if (mp->b_cont != NULL) { mblk_t *new_mp = msgpullup(mp, -1); freemsg(mp); if (new_mp) { mp = new_mp; status = vmxnet3_tx_one(dp, txq, &ol, mp, B_TRUE); } else { continue; } } } if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) { /* Fatal failure, drop it */ freemsg(mp); } } while (mps && status != VMXNET3_TX_RINGFULL); if (status == VMXNET3_TX_RINGFULL) { mp->b_next = mps; mps = mp; } else { ASSERT(!mps); } /* Notify the device */ mutex_enter(&dp->txLock); if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) { txqCtrl->txNumDeferred = 0; VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill); } mutex_exit(&dp->txLock); return mps; }
/* *--------------------------------------------------------------------------- * * vmxnet3_tx_prepare_offload -- * * Build the offload context of a msg. * * Results: * 0 if everything went well. * +n if n bytes need to be pulled up. * -1 in case of error (not used). * * Side effects: * None. * *--------------------------------------------------------------------------- */ static int vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp, vmxnet3_offload_t *ol, mblk_t *mp) { int ret = 0; uint32_t start, stuff, value, flags; #if defined(OPEN_SOLARIS) || defined(SOL11) uint32_t lso_flag, mss; #endif ol->om = VMXNET3_OM_NONE; ol->hlen = 0; ol->msscof = 0; hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags); #if defined(OPEN_SOLARIS) || defined(SOL11) mac_lso_get(mp, &mss, &lso_flag); if (flags || lso_flag) { #else if (flags) { #endif struct ether_vlan_header *eth = (void *) mp->b_rptr; uint8_t ethLen; if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) { ethLen = sizeof(struct ether_vlan_header); } else { ethLen = sizeof(struct ether_header); } VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, stuff=%u, value=%u\n", flags, ethLen, start, stuff, value); #if defined(OPEN_SOLARIS) || defined(SOL11) if (lso_flag & HW_LSO) { #else if (flags & HCK_PARTIALCKSUM) { ol->om = VMXNET3_OM_CSUM; ol->hlen = start + ethLen; ol->msscof = stuff + ethLen; } if (flags & HW_LSO) { #endif mblk_t *mblk = mp; uint8_t *ip, *tcp; uint8_t ipLen, tcpLen; /* * Copy e1000g's behavior: * - Do not assume all the headers are in the same mblk. * - Assume each header is always within one mblk. * - Assume the ethernet header is in the first mblk. */ ip = mblk->b_rptr + ethLen; if (ip >= mblk->b_wptr) { mblk = mblk->b_cont; ip = mblk->b_rptr; } ipLen = IPH_HDR_LENGTH((ipha_t *) ip); tcp = ip + ipLen; if (tcp >= mblk->b_wptr) { mblk = mblk->b_cont; tcp = mblk->b_rptr; } tcpLen = TCP_HDR_LENGTH((tcph_t *) tcp); if (tcp + tcpLen > mblk->b_wptr) { // careful, '>' instead of '>=' here mblk = mblk->b_cont; } ol->om = VMXNET3_OM_TSO; ol->hlen = ethLen + ipLen + tcpLen; #if defined(OPEN_SOLARIS) || defined(SOL11) ol->msscof = mss; #else /* OpenSolaris fills 'value' with the MSS but Solaris doesn't. */ ol->msscof = DB_LSOMSS(mp); #endif if (mblk != mp) { ret = ol->hlen; } } #if defined(OPEN_SOLARIS) || defined(SOL11) else if (flags & HCK_PARTIALCKSUM) { ol->om = VMXNET3_OM_CSUM; ol->hlen = start + ethLen; ol->msscof = stuff + ethLen; } #endif } return ret; } /* *--------------------------------------------------------------------------- * * vmxnet3_tx_one -- * * Map a msg into the Tx command ring of a vmxnet3 device. * * Results: * VMXNET3_TX_OK if everything went well. * VMXNET3_TX_RINGFULL if the ring is nearly full. * VMXNET3_TX_PULLUP if the msg is overfragmented. * VMXNET3_TX_FAILURE if there was a DMA or offload error. * * Side effects: * The ring is filled if VMXNET3_TX_OK is returned. * *--------------------------------------------------------------------------- */ static vmxnet3_txstatus vmxnet3_tx_one(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq, vmxnet3_offload_t *ol, mblk_t *mp, boolean_t retry) { int ret = VMXNET3_TX_OK; unsigned int frags = 0, totLen = 0; vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl; Vmxnet3_GenericDesc *txDesc; uint16_t sopIdx, eopIdx; uint8_t sopGen, curGen; mblk_t *mblk; mutex_enter(&dp->txLock); sopIdx = eopIdx = cmdRing->next2fill; sopGen = cmdRing->gen; curGen = !cmdRing->gen; for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) { unsigned int len = MBLKL(mblk); ddi_dma_cookie_t cookie; uint_t cookieCount; if (len) { totLen += len; } else { continue; } if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL, (caddr_t) mblk->b_rptr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie, &cookieCount) != DDI_DMA_MAPPED) { VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n"); ret = VMXNET3_TX_FAILURE; goto error; } ASSERT(cookieCount); do { uint64_t addr = cookie.dmac_laddress; size_t len = cookie.dmac_size; do { uint32_t dw2, dw3; size_t chunkLen; ASSERT(!txq->metaRing[eopIdx].mp); ASSERT(cmdRing->avail - frags); if (frags >= cmdRing->size - 1 || (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) { if (retry) { VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n", frags, cmdRing->size, ol->om); } ddi_dma_unbind_handle(dp->txDmaHandle); ret = VMXNET3_TX_PULLUP; goto error; } if (cmdRing->avail - frags <= 1) { dp->txMustResched = B_TRUE; ddi_dma_unbind_handle(dp->txDmaHandle); ret = VMXNET3_TX_RINGFULL; goto error; } if (len > VMXNET3_MAX_TX_BUF_SIZE) { chunkLen = VMXNET3_MAX_TX_BUF_SIZE; } else { chunkLen = len; } frags++; eopIdx = cmdRing->next2fill; txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx); ASSERT(txDesc->txd.gen != cmdRing->gen); // txd.addr txDesc->txd.addr = addr; // txd.dw2 dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen; dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT; txDesc->dword[2] = dw2; ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0); // txd.dw3 dw3 = 0; txDesc->dword[3] = dw3; VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill); curGen = cmdRing->gen; addr += chunkLen; len -= chunkLen; } while (len); if (--cookieCount) { ddi_dma_nextcookie(dp->txDmaHandle, &cookie); } } while (cookieCount); ddi_dma_unbind_handle(dp->txDmaHandle); } /* Update the EOP descriptor */ txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx); txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; /* Update the SOP descriptor. Must be done last */ txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx); if (ol->om == VMXNET3_OM_TSO && txDesc->txd.len != 0 && txDesc->txd.len < ol->hlen) { ret = VMXNET3_TX_FAILURE; goto error; } txDesc->txd.om = ol->om; txDesc->txd.hlen = ol->hlen; txDesc->txd.msscof = ol->msscof; membar_producer(); txDesc->txd.gen = sopGen; /* Update the meta ring & metadata */ txq->metaRing[sopIdx].mp = mp; txq->metaRing[eopIdx].sopIdx = sopIdx; txq->metaRing[eopIdx].frags = frags; cmdRing->avail -= frags; if (ol->om == VMXNET3_OM_TSO) { txqCtrl->txNumDeferred += (totLen - ol->hlen + ol->msscof - 1) / ol->msscof; } else { txqCtrl->txNumDeferred++; } VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx); goto done; error: /* Reverse the generation bits */ while (sopIdx != cmdRing->next2fill) { VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill); txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill); txDesc->txd.gen = !cmdRing->gen; } done: mutex_exit(&dp->txLock); return ret; } /* *--------------------------------------------------------------------------- * * vmxnet3_tx -- * * Send packets on a vmxnet3 device. * * Results: * NULL in case of success or failure. * The mps to be retransmitted later if the ring is full. * * Side effects: * None. * *--------------------------------------------------------------------------- */ mblk_t * vmxnet3_tx(void *data, mblk_t *mps) { vmxnet3_softc_t *dp = data; vmxnet3_txqueue_t *txq = &dp->txQueue; vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl; vmxnet3_txstatus status = VMXNET3_TX_OK; mblk_t *mp; ASSERT(mps != NULL); do { vmxnet3_offload_t ol; int pullup; mp = mps; mps = mp->b_next; mp->b_next = NULL; if (DB_TYPE(mp) != M_DATA) { /* * PR #315560: Solaris might pass M_PROTO mblks for some reason. * Drop them because we don't understand them and because their * contents are not Ethernet frames anyway. */ ASSERT(B_FALSE); freemsg(mp); continue; } /* * Prepare the offload while we're still handling the original * message -- msgpullup() discards the metadata afterwards. */ pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp); if (pullup) { mblk_t *new_mp = msgpullup(mp, pullup); freemsg(mp); if (new_mp) { mp = new_mp; } else { continue; } } /* * Try to map the message in the Tx ring. * This call might fail for non-fatal reasons. */ status = vmxnet3_tx_one(dp, txq, &ol, mp, B_FALSE); if (status == VMXNET3_TX_PULLUP) { /* * Try one more time after flattening * the message with msgpullup(). */ if (mp->b_cont != NULL) { mblk_t *new_mp = msgpullup(mp, -1); freemsg(mp); if (new_mp) { mp = new_mp; status = vmxnet3_tx_one(dp, txq, &ol, mp, B_TRUE); } else { continue; } } } if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) { /* Fatal failure, drop it */ freemsg(mp); } } while (mps && status != VMXNET3_TX_RINGFULL); if (status == VMXNET3_TX_RINGFULL) { mp->b_next = mps; mps = mp; } else { ASSERT(!mps); } /* Notify the device */ mutex_enter(&dp->txLock); if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) { txqCtrl->txNumDeferred = 0; VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill); } mutex_exit(&dp->txLock); return mps; }
/* * Detect dl attach, hold the dip to prevent it from detaching */ static int drwput(queue_t *q, mblk_t *mp) { struct drstate *dsp; union DL_primitives *dlp; dev_info_t *dip; switch (DB_TYPE(mp)) { case M_PROTO: case M_PCPROTO: break; default: putnext(q, mp); return (0); } /* make sure size is sufficient for dl_primitive */ if (MBLKL(mp) < sizeof (t_uscalar_t)) { putnext(q, mp); return (0); } dlp = (union DL_primitives *)mp->b_rptr; switch (dlp->dl_primitive) { case DL_ATTACH_REQ: /* * Check for proper size of the message. * * If size is correct, get the ppa and attempt to * hold the device assuming ppa is instance. * * If size is wrong, we can't get the ppa, but * still increment dr_nfirst because the read side * will get a error ack on DL_ATTACH_REQ. */ dip = NULL; dsp = q->q_ptr; if (MBLKL(mp) >= DL_OK_ACK_SIZE) { dip = ddi_hold_devi_by_instance(dsp->dr_major, dlp->attach_req.dl_ppa, E_DDI_HOLD_DEVI_NOATTACH); } mutex_enter(&dsp->dr_lock); dsp->dr_dip[dsp->dr_nfirst] = dip; INCR(dsp->dr_nfirst); /* * Check if ring buffer is full. If so, assert in debug * kernel and produce a warning in non-debug kernel. */ ASSERT(dsp->dr_nfirst != dsp->dr_nlast); if (dsp->dr_nfirst == dsp->dr_nlast) { cmn_err(CE_WARN, "drcompat: internal buffer full"); } mutex_exit(&dsp->dr_lock); break; default: break; } putnext(q, mp); return (0); }
static int drrput(queue_t *q, mblk_t *mp) { struct drstate *dsp; union DL_primitives *dlp; dev_info_t *dip; switch (DB_TYPE(mp)) { case M_PROTO: case M_PCPROTO: break; default: putnext(q, mp); return (0); } /* make sure size is sufficient for dl_primitive */ if (MBLKL(mp) < sizeof (t_uscalar_t)) { putnext(q, mp); return (0); } dlp = (union DL_primitives *)mp->b_rptr; switch (dlp->dl_primitive) { case DL_OK_ACK: { /* check for proper size, let upper layer deal with error */ if (MBLKL(mp) < DL_OK_ACK_SIZE) { putnext(q, mp); return (0); } dsp = q->q_ptr; switch (dlp->ok_ack.dl_correct_primitive) { case DL_ATTACH_REQ: /* * ddi_assoc_queue_with_devi() will hold dip, * so release after association. * * dip is NULL means we didn't hold dip on read side. * (unlikely, but possible), so we do nothing. */ mutex_enter(&dsp->dr_lock); dip = dsp->dr_dip[dsp->dr_nlast]; dsp->dr_dip[dsp->dr_nlast] = NULL; INCR(dsp->dr_nlast); mutex_exit(&dsp->dr_lock); if (dip) { ddi_assoc_queue_with_devi(q, dip); ddi_release_devi(dip); } break; case DL_DETACH_REQ: ddi_assoc_queue_with_devi(q, NULL); break; default: break; } break; } case DL_ERROR_ACK: if (dlp->error_ack.dl_error_primitive != DL_ATTACH_REQ) break; dsp = q->q_ptr; mutex_enter(&dsp->dr_lock); dip = dsp->dr_dip[dsp->dr_nlast]; dsp->dr_dip[dsp->dr_nlast] = NULL; INCR(dsp->dr_nlast); mutex_exit(&dsp->dr_lock); /* * Release dip on attach failure */ if (dip) { ddi_release_devi(dip); } break; default: break; } putnext(q, mp); return (0); }
/* * Message must be of type M_IOCTL or M_IOCDATA for this routine to be called. */ static void ptioc(queue_t *q, mblk_t *mp, int qside) { struct ptem *tp; struct iocblk *iocp; struct winsize *wb; struct jwinsize *jwb; mblk_t *tmp; mblk_t *pckt_msgp; /* message sent to the PCKT module */ int error; iocp = (struct iocblk *)mp->b_rptr; tp = (struct ptem *)q->q_ptr; switch (iocp->ioc_cmd) { case JWINSIZE: /* * For compatibility: If all zeros, NAK the message for dumb * terminals. */ if ((tp->wsz.ws_row == 0) && (tp->wsz.ws_col == 0) && (tp->wsz.ws_xpixel == 0) && (tp->wsz.ws_ypixel == 0)) { miocnak(q, mp, 0, EINVAL); return; } tmp = allocb(sizeof (struct jwinsize), BPRI_MED); if (tmp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } if (iocp->ioc_count == TRANSPARENT) mcopyout(mp, NULL, sizeof (struct jwinsize), NULL, tmp); else mioc2ack(mp, tmp, sizeof (struct jwinsize), 0); jwb = (struct jwinsize *)mp->b_cont->b_rptr; jwb->bytesx = tp->wsz.ws_col; jwb->bytesy = tp->wsz.ws_row; jwb->bitsx = tp->wsz.ws_xpixel; jwb->bitsy = tp->wsz.ws_ypixel; qreply(q, mp); return; case TIOCGWINSZ: /* * If all zeros NAK the message for dumb terminals. */ if ((tp->wsz.ws_row == 0) && (tp->wsz.ws_col == 0) && (tp->wsz.ws_xpixel == 0) && (tp->wsz.ws_ypixel == 0)) { miocnak(q, mp, 0, EINVAL); return; } tmp = allocb(sizeof (struct winsize), BPRI_MED); if (tmp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } mioc2ack(mp, tmp, sizeof (struct winsize), 0); wb = (struct winsize *)mp->b_cont->b_rptr; wb->ws_row = tp->wsz.ws_row; wb->ws_col = tp->wsz.ws_col; wb->ws_xpixel = tp->wsz.ws_xpixel; wb->ws_ypixel = tp->wsz.ws_ypixel; qreply(q, mp); return; case TIOCSWINSZ: error = miocpullup(mp, sizeof (struct winsize)); if (error != 0) { miocnak(q, mp, 0, error); return; } wb = (struct winsize *)mp->b_cont->b_rptr; /* * Send a SIGWINCH signal if the row/col information has * changed. */ if ((tp->wsz.ws_row != wb->ws_row) || (tp->wsz.ws_col != wb->ws_col) || (tp->wsz.ws_xpixel != wb->ws_xpixel) || (tp->wsz.ws_ypixel != wb->ws_xpixel)) { /* * SIGWINCH is always sent upstream. */ if (qside == WRSIDE) (void) putnextctl1(RD(q), M_SIG, SIGWINCH); else if (qside == RDSIDE) (void) putnextctl1(q, M_SIG, SIGWINCH); /* * Message may have come in as an M_IOCDATA; pass it * to the master side as an M_IOCTL. */ mp->b_datap->db_type = M_IOCTL; if (qside == WRSIDE) { /* * Need a copy of this message to pass on to * the PCKT module, only if the M_IOCTL * orginated from the slave side. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); return; } putnext(q, pckt_msgp); } tp->wsz.ws_row = wb->ws_row; tp->wsz.ws_col = wb->ws_col; tp->wsz.ws_xpixel = wb->ws_xpixel; tp->wsz.ws_ypixel = wb->ws_ypixel; } mioc2ack(mp, NULL, 0, 0); qreply(q, mp); return; case TIOCSIGNAL: { /* * This ioctl can emanate from the master side in remote * mode only. */ int sig; if (DB_TYPE(mp) == M_IOCTL && iocp->ioc_count != TRANSPARENT) { error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); return; } } if (DB_TYPE(mp) == M_IOCDATA || iocp->ioc_count != TRANSPARENT) sig = *(int *)mp->b_cont->b_rptr; else sig = (int)*(intptr_t *)mp->b_cont->b_rptr; if (sig < 1 || sig >= NSIG) { miocnak(q, mp, 0, EINVAL); return; } /* * Send an M_PCSIG message up the slave's read side and * respond back to the master with an ACK or NAK as * appropriate. */ if (putnextctl1(q, M_PCSIG, sig) == 0) { miocnak(q, mp, 0, EAGAIN); return; } mioc2ack(mp, NULL, 0, 0); qreply(q, mp); return; } case TIOCREMOTE: { int onoff; mblk_t *mctlp; if (DB_TYPE(mp) == M_IOCTL) { error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); return; } } onoff = *(int *)mp->b_cont->b_rptr; /* * Send M_CTL up using the iocblk format. */ mctlp = mkiocb(onoff ? MC_NO_CANON : MC_DO_CANON); if (mctlp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } mctlp->b_datap->db_type = M_CTL; putnext(q, mctlp); /* * ACK the ioctl. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); /* * Record state change. */ if (onoff) tp->state |= REMOTEMODE; else tp->state &= ~REMOTEMODE; return; } default: putnext(q, mp); return; } }