/* Enqueue a single packet 'mp' for sending */ static boolean_t virtionet_send(virtionet_state_t *sp, mblk_t *mp) { void *buf; size_t mlen; int idx; ASSERT(mp != NULL); mlen = msgsize(mp); ASSERT(mlen <= 2048); cmn_err(CE_CONT, "Sending message of %d bytes\n", mlen); idx = sp->txq->vr_avail->idx; buf = sp->txbuf->addr + idx * 2048; mcopymsg(mp, buf); sp->txq->vr_desc[idx].len = mlen; ddi_dma_sync(sp->txbuf->hdl, idx * 2048, mlen, DDI_DMA_SYNC_FORDEV); sp->txq->vr_avail->idx++; /* The next is suboptimal, should calculate exact offset/size */ ddi_dma_sync(sp->txq->vq_dma.hdl, 0, 0, DDI_DMA_SYNC_FORDEV); return (B_TRUE); }
/* * Copy a message * * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed )) * -. uinst_t->lock : M [RW_READER] * -. uinst_t->u_lock : A * -. uinst_t->l_lock : P * -. uinst_t->c_lock : P */ int oplmsu_cmn_copymb(queue_t *q, mblk_t *mp, mblk_t **nmp, mblk_t *cmp, int rw_flag) { int rval = SUCCESS; ASSERT(RW_READ_HELD(&oplmsu_uinst->lock)); if ((*nmp = copymsg(cmp)) == NULL) { oplmsu_cmn_bufcall(q, mp, msgsize(cmp), rw_flag); rval = FAILURE; } return (rval); }
void mm_write (int fd, struct mm_msg *m) { static byte lasttype = MM_RESET; int size = msgsize(m->type); byte status = 0x80 | (m->chan&0x0F) | ((m->type&0x07)<<4); byte buf[3] = {status, m->arg1, m->arg2}; if (realtime(m->type)) { E("write", write(fd, &m->type, 1)); return; } { byte *out = buf; if (lasttype == status) { size--; out++; } for (;;) { int written = write(fd, out, size); if (!written) exit(0); if (written == size) break; if (written < 0) err(1, "write"); size -= written; out += written; }}}
uint32_t leaf_size(struct node *leaf) { int i; uint32_t sz = 0U; nassert(leaf->n_children == 1); for (i = 0; i < leaf->n_children; i++) { if (nessunlikely(i < (leaf->n_children - 1))) sz += msgsize(&leaf->pivots[i]); sz += sizeof(leaf->parts[i]); sz += lmb_memsize(leaf->parts[i].msgbuf); } sz += sizeof(*leaf); return sz; }
uint32_t node_size(struct node *n) { uint32_t size = 0U; size += (sizeof(*n)); if (nessunlikely(n->height == 0)) { size += lmb_memsize(n->u.l.buffer); } else { uint32_t i; for (i = 0; i < n->u.n.n_children - 1; i++) { size += msgsize(&n->u.n.pivots[i]); } for (i = 0; i < n->u.n.n_children; i++) { size += nmb_memsize(n->u.n.parts[i].buffer); } } return size; }
int efe_send(efe_t *efep, mblk_t *mp) { efe_ring_t *rp; uint16_t len; efe_desc_t *dp; uint16_t status; efe_buf_t *bp; ASSERT(mutex_owned(&efep->efe_txlock)); rp = efep->efe_tx_ring; len = msgsize(mp); if (len > ETHERMAX + VLAN_TAGSZ) { efep->efe_oerrors++; efep->efe_macxmt_errors++; freemsg(mp); return (DDI_SUCCESS); } dp = GETDESC(rp, efep->efe_tx_desc); SYNCDESC(rp, efep->efe_tx_desc, DDI_DMA_SYNC_FORKERNEL); status = GETDESC16(efep->efe_tx_ring, &dp->d_status); /* Stop if device owns descriptor */ if (status & TXSTAT_OWNER) { return (DDI_FAILURE); } bp = GETBUF(rp, efep->efe_tx_desc); mcopymsg(mp, bp->b_kaddr); /* * Packets must contain at least ETHERMIN octets. * Padded octets are zeroed out prior to sending. */ if (len < ETHERMIN) { bzero(bp->b_kaddr + len, ETHERMIN - len); len = ETHERMIN; } SYNCBUF(bp, DDI_DMA_SYNC_FORDEV); PUTDESC16(rp, &dp->d_status, TXSTAT_OWNER); PUTDESC16(rp, &dp->d_len, len); PUTDESC16(rp, &dp->d_control, TXCTL_LASTDESCR); SYNCDESC(rp, efep->efe_tx_desc, DDI_DMA_SYNC_FORDEV); efep->efe_opackets++; efep->efe_obytes += len; if (*bp->b_kaddr & 0x01) { if (bcmp(bp->b_kaddr, efe_broadcast, ETHERADDRL) == 0) { efep->efe_brdcstxmt++; } else { efep->efe_multixmt++; } } efep->efe_tx_desc = NEXTDESC(rp, efep->efe_tx_desc); return (DDI_SUCCESS); }
static boolean_t pcn_send(pcn_t *pcnp, mblk_t *mp) { size_t len; pcn_buf_t *txb; pcn_tx_desc_t *tmd; int txsend; ASSERT(mutex_owned(&pcnp->pcn_xmtlock)); ASSERT(mp != NULL); len = msgsize(mp); if (len > ETHERVLANMTU) { pcnp->pcn_macxmt_errors++; freemsg(mp); return (B_TRUE); } if (pcnp->pcn_txavail < PCN_TXRECLAIM) pcn_reclaim(pcnp); if (pcnp->pcn_txavail == 0) { pcnp->pcn_wantw = B_TRUE; /* enable tx interrupt */ PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_LTINTEN); return (B_FALSE); } txsend = pcnp->pcn_txsend; /* * We copy the packet to a single buffer. NetBSD sources suggest * that if multiple segements are ever used, VMware has a bug that will * only allow 8 segments to be used, while the physical chips allow 16 */ txb = pcnp->pcn_txbufs[txsend]; mcopymsg(mp, txb->pb_buf); /* frees mp! */ pcnp->pcn_opackets++; pcnp->pcn_obytes += len; if (txb->pb_buf[0] & 0x1) { if (bcmp(txb->pb_buf, pcn_broadcast, ETHERADDRL) != 0) pcnp->pcn_multixmt++; else pcnp->pcn_brdcstxmt++; } tmd = &pcnp->pcn_txdescp[txsend]; SYNCBUF(txb, len, DDI_DMA_SYNC_FORDEV); tmd->pcn_txstat = 0; tmd->pcn_tbaddr = txb->pb_paddr; /* PCNet wants the 2's complement of the length of the buffer */ tmd->pcn_txctl = (~(len) + 1) & PCN_TXCTL_BUFSZ; tmd->pcn_txctl |= PCN_TXCTL_MBO; tmd->pcn_txctl |= PCN_TXCTL_STP | PCN_TXCTL_ENP | PCN_TXCTL_ADD_FCS | PCN_TXCTL_OWN | PCN_TXCTL_MORE_LTINT; SYNCTXDESC(pcnp, txsend, DDI_DMA_SYNC_FORDEV); pcnp->pcn_txavail--; pcnp->pcn_txsend = (txsend + 1) % PCN_TXRING; pcnp->pcn_txstall_time = gethrtime() + (5 * 1000000000ULL); pcn_csr_write(pcnp, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); return (B_TRUE); }