Ejemplo n.º 1
0
    int
dl_unitdata_req( queue_t *q, mblk_t *m0, ushort type, caddr_t addr )
{
    union DL_primitives	*dl;
    struct llc		*llc;
    mblk_t		*m1, *m;
    ushort              len;

    /* len = msgdsize( m0 ) + sizeof( struct llc ); */

    if (( m1 = allocb( sizeof( struct llc ), BPRI_HI )) == NULL ) {
	cmn_err( CE_NOTE, "dl_unitdate_req NOMEM 1\n" );
	return( ENOMEM );
    }
    m1->b_wptr = m1->b_rptr + sizeof( struct llc );
    m1->b_datap->db_type = M_DATA;
    llc = (struct llc *)m1->b_rptr;

    llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
    llc->llc_control = LLC_UI;
    if ( type == ETHERTYPE_AARP ) {
	bcopy( aarp_org_code, llc->llc_org_code, sizeof( aarp_org_code ));
    } else if ( type == ETHERTYPE_AT ) {
	bcopy( at_org_code, llc->llc_org_code, sizeof( aarp_org_code ));
    } else {
	cmn_err( CE_NOTE, "dl_unitdate_req type %X\n", type );
	return( EINVAL );
    }
    llc->llc_ether_type = htons( type );
    linkb( m1, m0 );

    if (( m = allocb( DL_UNITDATA_REQ_SIZE + ETHERADDRL + sizeof( ushort ),
		      BPRI_HI )) == NULL ) {
	cmn_err( CE_NOTE, "dl_unitdate_req NOMEM 2\n" );
	return( ENOMEM );
    }
    m->b_wptr = m->b_rptr + DL_UNITDATA_REQ_SIZE;
    m->b_datap->db_type = M_PROTO;
    linkb( m, m1 );

    dl = (union DL_primitives *)m->b_rptr;
    dl->dl_primitive = DL_UNITDATA_REQ;
    dl->unitdata_req.dl_dest_addr_length = ETHERADDRL + sizeof ( ushort );
    dl->unitdata_req.dl_dest_addr_offset = m->b_wptr - m->b_rptr;

    bcopy(addr, m->b_wptr, ETHERADDRL );
    m->b_wptr += ETHERADDRL;
    len = 0;
    bcopy( &len, m->b_wptr, sizeof( ushort ));
    m->b_wptr += sizeof( ushort );
    putnext( q, m );
    return( 0 );
}
Ejemplo n.º 2
0
/* ARGSUSED */
static mblk_t *
sppp_dladdether(spppstr_t *sps, mblk_t *mp, t_scalar_t proto)
{
	mblk_t		*eh;
	t_scalar_t	type;

	if ((eh = allocb(sizeof (struct ether_header), BPRI_MED)) == NULL) {
		freemsg(mp);
		return (NULL);
	}
	if (proto == PPP_IP) {
		type = ETHERTYPE_IP;
	} else if (proto == PPP_IPV6) {
		type = ETHERTYPE_IPV6;
	} else {
		/*
		 * For all other protocols, end this up as an ETHERTYPE_PPP
		 * type of packet. Since we've skipped the PPP headers in the
		 * caller, make sure that we restore it. We know for sure that
		 * the PPP header still exists in the message (only skipped),
		 * since the sender of this message is pppd and it must have
		 * included the PPP header in front.
		 */
		type = ETHERTYPE_PPP;
		mp->b_rptr -= PPP_HDRLEN;
		ASSERT(mp->b_rptr >= mp->b_datap->db_base);
	}
	eh->b_wptr += sizeof (struct ether_header);
	bzero((caddr_t)eh->b_rptr, sizeof (struct ether_header));
	((struct ether_header *)eh->b_rptr)->ether_type = htons((int16_t)type);

	linkb(eh, mp);
	return (eh);
}
Ejemplo n.º 3
0
/*
 * Return a chain of mblks representing the Multidata packet.
 */
mblk_t *
mmd_transform_link(pdesc_t *pd)
{
	multidata_t *mmd;
	pdescinfo_t *pdi;
	mblk_t *nmp = NULL;

	ASSERT(pd != NULL);
	ASSERT(pd->pd_magic == PDESC_MAGIC);

	mmd = pd->pd_slab->pds_mmd;
	ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);

	/* entry has been removed */
	if (pd->pd_flags & PDESC_REM_DEFER)
		return (NULL);

	pdi = &(pd->pd_pdi);

	/* duplicate header buffer */
	if ((pdi->flags & PDESC_HBUF_REF)) {
		if ((nmp = dupb(mmd->mmd_hbuf)) == NULL)
			return (NULL);
		nmp->b_rptr = pdi->hdr_rptr;
		nmp->b_wptr = pdi->hdr_wptr;
	}

	/* duplicate payload buffer(s) */
	if (pdi->flags & PDESC_PBUF_REF) {
		int i;
		mblk_t *mp;
		struct pld_ary_s *pa = &pdi->pld_ary[0];

		mutex_enter(&mmd->mmd_pd_slab_lock);
		for (i = 0; i < pdi->pld_cnt; i++, pa++) {
			ASSERT(mmd->mmd_pbuf[pa->pld_pbuf_idx] != NULL);

			/* skip empty ones */
			if (PDESC_PLD_SPAN_SIZE(pdi, i) == 0)
				continue;

			mp = dupb(mmd->mmd_pbuf[pa->pld_pbuf_idx]);
			if (mp == NULL) {
				if (nmp != NULL)
					freemsg(nmp);
				mutex_exit(&mmd->mmd_pd_slab_lock);
				return (NULL);
			}
			mp->b_rptr = pa->pld_rptr;
			mp->b_wptr = pa->pld_wptr;
			if (nmp == NULL)
				nmp = mp;
			else
				linkb(nmp, mp);
		}
		mutex_exit(&mmd->mmd_pd_slab_lock);
	}

	return (nmp);
}
Ejemplo n.º 4
0
/* ARGSUSED */
mblk_t *
sppp_dladdud(spppstr_t *sps, mblk_t *mp, t_scalar_t proto, boolean_t promisc)
{
	dl_unitdata_ind_t *dlu;
	mblk_t		*dh;
	size_t		size;
	t_scalar_t	type;

	size = sizeof (dl_unitdata_ind_t) + (2 * SPPP_ADDRL);
	if ((dh = allocb(size, BPRI_MED)) == NULL) {
		freemsg(mp);
		return (NULL);
	}

	dh->b_datap->db_type = M_PROTO;
	dh->b_wptr = dh->b_datap->db_lim;
	dh->b_rptr = dh->b_wptr - size;

	dlu = (dl_unitdata_ind_t *)dh->b_rptr;
	dlu->dl_primitive = DL_UNITDATA_IND;
	dlu->dl_dest_addr_length = SPPP_ADDRL;
	dlu->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
	dlu->dl_src_addr_length = SPPP_ADDRL;
	dlu->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + SPPP_ADDRL;
	dlu->dl_group_address = 0;

	if (promisc) {
		if (proto == PPP_IP) {
			type = ETHERTYPE_IP;
		} else if (proto == PPP_IPV6) {
			type = ETHERTYPE_IPV6;
		} else {
			/*
			 * For all other protocols, send this up as an
			 * ETHERTYPE_PPP type of packet. Since we've skipped
			 * the PPP headers in the caller, make sure that we
			 * restore it. We know for sure that the PPP header
			 * still exists in the message (only skipped), since
			 * the sender of this message is pppd and it must
			 * have included the PPP header in front.
			 */
			type = ETHERTYPE_PPP;
			mp->b_rptr -= PPP_HDRLEN;
			ASSERT(mp->b_rptr >= mp->b_datap->db_base);
		}
	} else {
		type = sps->sps_req_sap;
	}
	/*
	 * Send the DLPI client the data with the SAP they requested,
	 * (e.g. ETHERTYPE_IP) rather than the PPP protocol (e.g. PPP_IP).
	 */
	((spppreqsap_t *)(dlu + 1))[0] = type;
	((spppreqsap_t *)(dlu + 1))[1] = type;

	linkb(dh, mp);
	return (dh);
}
Ejemplo n.º 5
0
/*
 * Called from sctp_input_data() to add one error chunk to the error
 * chunks list.  The error chunks list will be processed at the end
 * of sctp_input_data() by calling sctp_process_err().
 */
void
sctp_add_err(sctp_t *sctp, uint16_t serror, void *details, size_t len,
    sctp_faddr_t *dest)
{
	sctp_stack_t *sctps = sctp->sctp_sctps;
	mblk_t *emp;
	uint32_t emp_len;
	uint32_t mss;
	mblk_t *sendmp;
	sctp_faddr_t *fp;

	emp = sctp_make_err(sctp, serror, details, len);
	if (emp == NULL)
		return;
	emp_len = MBLKL(emp);
	if (sctp->sctp_err_chunks != NULL) {
		fp = SCTP_CHUNK_DEST(sctp->sctp_err_chunks);
	} else {
		fp = dest;
		SCTP_SET_CHUNK_DEST(emp, dest);
	}
	mss = fp->sf_pmss;

	/*
	 * If the current output packet cannot include the new error chunk,
	 * send out the current packet and then add the new error chunk
	 * to the new output packet.
	 */
	if (sctp->sctp_err_len + emp_len > mss) {
		if ((sendmp = sctp_make_mp(sctp, fp, 0)) == NULL) {
			SCTP_KSTAT(sctps, sctp_send_err_failed);
			/* Just free the latest error chunk. */
			freeb(emp);
			return;
		}
		sendmp->b_cont = sctp->sctp_err_chunks;
		sctp_set_iplen(sctp, sendmp, fp->sf_ixa);
		(void) conn_ip_output(sendmp, fp->sf_ixa);
		BUMP_LOCAL(sctp->sctp_opkts);

		sctp->sctp_err_chunks = emp;
		sctp->sctp_err_len = emp_len;
		SCTP_SET_CHUNK_DEST(emp, dest);
	} else {
		if (sctp->sctp_err_chunks != NULL)
			linkb(sctp->sctp_err_chunks, emp);
		else
			sctp->sctp_err_chunks = emp;
		sctp->sctp_err_len += emp_len;
	}
	/* Assume that we will send it out... */
	BUMP_LOCAL(sctp->sctp_obchunks);
}
Ejemplo n.º 6
0
/*
 * got_errchunk is set B_TRUE only if called from validate_init_params(), when
 * an ERROR chunk is already prepended the size of which needs updating for
 * additional unrecognized parameters. Other callers either prepend the ERROR
 * chunk with the correct size after calling this function, or they are calling
 * to add an invalid parameter to an INIT_ACK chunk, in that case no ERROR chunk
 * exists, the CAUSE blocks go into the INIT_ACK directly.
 *
 * *errmp will be non-NULL both when adding an additional CAUSE block to an
 * existing prepended COOKIE ERROR chunk (processing params of an INIT_ACK),
 * and when adding unrecognized parameters after the first, to an INIT_ACK
 * (processing params of an INIT chunk).
 */
void
sctp_add_unrec_parm(sctp_parm_hdr_t *uph, mblk_t **errmp,
    boolean_t got_errchunk)
{
	mblk_t *mp;
	sctp_parm_hdr_t *ph;
	size_t len;
	int pad;
	sctp_chunk_hdr_t *ecp;

	len = sizeof (*ph) + ntohs(uph->sph_len);
	if ((pad = len % SCTP_ALIGN) != 0) {
		pad = SCTP_ALIGN - pad;
		len += pad;
	}
	mp = allocb(len, BPRI_MED);
	if (mp == NULL) {
		return;
	}

	ph = (sctp_parm_hdr_t *)(mp->b_rptr);
	ph->sph_type = htons(PARM_UNRECOGNIZED);
	ph->sph_len = htons(len - pad);

	/* copy in the unrecognized parameter */
	bcopy(uph, ph + 1, ntohs(uph->sph_len));

	if (pad != 0)
		bzero((mp->b_rptr + len - pad), pad);

	mp->b_wptr = mp->b_rptr + len;
	if (*errmp != NULL) {
		/*
		 * Update total length if an ERROR chunk, then link
		 * this CAUSE block to the possible chain of CAUSE
		 * blocks attached to the ERROR chunk or INIT_ACK
		 * being created.
		 */
		if (got_errchunk) {
			/* ERROR chunk already prepended */
			ecp = (sctp_chunk_hdr_t *)((*errmp)->b_rptr);
			ecp->sch_len = htons(ntohs(ecp->sch_len) + len);
		}
		linkb(*errmp, mp);
	} else {
		*errmp = mp;
	}
}
Ejemplo n.º 7
0
static int
sctp_asconf_add(sctp_asconf_t *asc, mblk_t *mp)
{
	uint32_t *cp;

	/* XXX can't exceed MTU */

	cp = (uint32_t *)(mp->b_rptr + sizeof (sctp_parm_hdr_t));
	*cp = asc->cid++;

	if (asc->head == NULL)
		asc->head = mp;
	else
		linkb(asc->head, mp);

	return (0);
}
Ejemplo n.º 8
0
ssize_t
sctp_link_abort(mblk_t *mp, uint16_t serror, char *details, size_t len,
    int iserror, boolean_t tbit)
{
	size_t alen;
	mblk_t *amp;
	sctp_chunk_hdr_t *acp;
	sctp_parm_hdr_t *eph;

	ASSERT(mp != NULL && mp->b_cont == NULL);

	alen = sizeof (*acp) + (serror != 0 ? (sizeof (*eph) + len) : 0);

	amp = allocb(alen, BPRI_MED);
	if (amp == NULL) {
		return (-1);
	}

	amp->b_wptr = amp->b_rptr + alen;

	/* Chunk header */
	acp = (sctp_chunk_hdr_t *)amp->b_rptr;
	acp->sch_id = iserror ? CHUNK_ERROR : CHUNK_ABORT;
	acp->sch_flags = 0;
	acp->sch_len = htons(alen);
	if (tbit)
		SCTP_SET_TBIT(acp);

	linkb(mp, amp);

	if (serror == 0) {
		return (alen);
	}

	eph = (sctp_parm_hdr_t *)(acp + 1);
	eph->sph_type = htons(serror);
	eph->sph_len = htons(len + sizeof (*eph));

	if (len > 0) {
		bcopy(details, eph + 1, len);
	}

	/* XXX pad */

	return (alen);
}
Ejemplo n.º 9
0
/*
 * Return duplicate message block(s) of the associated buffer(s).
 */
int
mmd_dupbufs(multidata_t *mmd, mblk_t **hmp, mblk_t **pmp)
{
	ASSERT(mmd != NULL);
	ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);

	if (hmp != NULL) {
		*hmp = NULL;
		if (mmd->mmd_hbuf != NULL &&
		    (*hmp = dupb(mmd->mmd_hbuf)) == NULL)
			return (-1);
	}

	if (pmp != NULL) {
		int i;
		mblk_t *mp;

		mutex_enter(&mmd->mmd_pd_slab_lock);
		*pmp = NULL;
		for (i = 0; i < mmd->mmd_pbuf_cnt; i++) {
			ASSERT(mmd->mmd_pbuf[i] != NULL);
			mp = dupb(mmd->mmd_pbuf[i]);
			if (mp == NULL) {
				if (hmp != NULL && *hmp != NULL)
					freeb(*hmp);
				if (*pmp != NULL)
					freemsg(*pmp);
				mutex_exit(&mmd->mmd_pd_slab_lock);
				return (-1);
			}
			if (*pmp == NULL)
				*pmp = mp;
			else
				linkb(*pmp, mp);
		}
		mutex_exit(&mmd->mmd_pd_slab_lock);
	}

	return (0);
}
Ejemplo n.º 10
0
/* ARGSUSED */
static int
sdpfp_senduio(sock_lower_handle_t handle, struct uio *uiop,
    struct nmsghdr *msg, struct cred *cred)
{
	struct sockaddr_ll *sol;
	mac_client_handle_t mch;
	struct pfpsock *ps;
	boolean_t new_open;
	mac_handle_t mh;
	size_t mpsize;
	uint_t maxsdu;
	mblk_t *mp0;
	mblk_t *mp;
	int error;

	mp = NULL;
	mp0 = NULL;
	new_open = B_FALSE;
	ps = (struct pfpsock *)handle;
	mh = ps->ps_mh;
	mch = ps->ps_mch;
	maxsdu = ps->ps_max_sdu;

	sol = (struct sockaddr_ll *)msg->msg_name;
	if (sol == NULL) {
		/*
		 * If no sockaddr_ll has been provided with the send call,
		 * use the one constructed when the socket was bound to an
		 * interface and fail if it hasn't been bound.
		 */
		if (!ps->ps_bound) {
			ks_stats.kp_send_unbound.value.ui64++;
			return (EPROTO);
		}
		sol = (struct sockaddr_ll *)&ps->ps_sock;
	} else {
		/*
		 * Verify the sockaddr_ll message passed down before using
		 * it to send a packet out with. If it refers to an interface
		 * that has not been bound, it is necessary to open it.
		 */
		struct sockaddr_ll *sll;

		if (msg->msg_namelen < sizeof (struct sockaddr_ll)) {
			ks_stats.kp_send_short_msg.value.ui64++;
			return (EINVAL);
		}

		if (sol->sll_family != AF_PACKET) {
			ks_stats.kp_send_wrong_family.value.ui64++;
			return (EAFNOSUPPORT);
		}

		sll = (struct sockaddr_ll *)&ps->ps_sock;
		if (sol->sll_ifindex != sll->sll_ifindex) {
			error = pfp_open_index(sol->sll_ifindex, &mh, &mch,
			    cred);
			if (error != 0) {
				ks_stats.kp_send_open_fail.value.ui64++;
				return (error);
			}
			mac_sdu_get(mh, NULL, &maxsdu);
			new_open = B_TRUE;
		}
	}

	mpsize = uiop->uio_resid;
	if (mpsize > maxsdu) {
		ks_stats.kp_send_too_big.value.ui64++;
		error = EMSGSIZE;
		goto done;
	}

	if ((mp = allocb(mpsize, BPRI_HI)) == NULL) {
		ks_stats.kp_send_alloc_fail.value.ui64++;
		error = ENOBUFS;
		goto done;
	}

	mp->b_wptr = mp->b_rptr + mpsize;
	error = uiomove(mp->b_rptr, mpsize, UIO_WRITE, uiop);
	if (error != 0) {
		ks_stats.kp_send_uiomove_fail.value.ui64++;
		goto done;
	}

	if (ps->ps_type == SOCK_DGRAM) {
		mp0 = mac_header(mh, sol->sll_addr, sol->sll_protocol, mp, 0);
		if (mp0 == NULL) {
			ks_stats.kp_send_no_memory.value.ui64++;
			error = ENOBUFS;
			goto done;
		}
		linkb(mp0, mp);
		mp = mp0;
	}

	/*
	 * As this is sending datagrams and no promise is made about
	 * how or if a packet will be sent/delivered, no effort is to
	 * be expended in recovering from a situation where the packet
	 * cannot be sent - it is just dropped.
	 */
	error = mac_tx(mch, mp, 0, MAC_DROP_ON_NO_DESC, NULL);
	if (error == 0) {
		mp = NULL;
		ks_stats.kp_send_ok.value.ui64++;
	} else {
		ks_stats.kp_send_failed.value.ui64++;
	}

done:

	if (new_open) {
		ASSERT(mch != ps->ps_mch);
		ASSERT(mh != ps->ps_mh);
		pfp_close(mh, mch);
	}
	if (mp != NULL)
		freemsg(mp);

	return (error);

}
Ejemplo n.º 11
0
/* ARGSUSED */
static void
pfp_packet(void *arg, mac_resource_handle_t mrh, mblk_t *mp, boolean_t flag)
{
	struct T_unitdata_ind *tunit;
	struct sockaddr_ll *sll;
	struct sockaddr_ll *sol;
	mac_header_info_t hdr;
	struct pfpsock *ps;
	size_t tusz;
	mblk_t *mp0;
	int error;

	if (mp == NULL)
		return;

	ps = arg;
	if (ps->ps_flow_ctrld) {
		ps->ps_flow_ctrl_drops++;
		ps->ps_stats.tp_drops++;
		ks_stats.kp_recv_flow_cntrld.value.ui64++;
		freemsg(mp);
		return;
	}

	if (mac_header_info(ps->ps_mh, mp, &hdr) != 0) {
		/*
		 * Can't decode the packet header information so drop it.
		 */
		ps->ps_stats.tp_drops++;
		ks_stats.kp_recv_mac_hdr_fail.value.ui64++;
		freemsg(mp);
		return;
	}

	if (mac_type(ps->ps_mh) == DL_ETHER &&
	    hdr.mhi_bindsap == ETHERTYPE_VLAN) {
		struct ether_vlan_header *evhp;
		struct ether_vlan_header evh;

		hdr.mhi_hdrsize = sizeof (struct ether_vlan_header);
		hdr.mhi_istagged = B_TRUE;

		if (MBLKL(mp) >= sizeof (*evhp)) {
			evhp = (struct ether_vlan_header *)mp->b_rptr;
		} else {
			int sz = sizeof (*evhp);
			char *s = (char *)&evh;
			mblk_t *tmp;
			int len;

			for (tmp = mp; sz > 0 && tmp != NULL;
			    tmp = tmp->b_cont) {
				len = min(sz, MBLKL(tmp));
				bcopy(tmp->b_rptr, s, len);
				sz -= len;
			}
			evhp = &evh;
		}
		hdr.mhi_tci = ntohs(evhp->ether_tci);
		hdr.mhi_bindsap = ntohs(evhp->ether_type);
	}

	if ((ps->ps_proto != 0) && (ps->ps_proto != hdr.mhi_bindsap)) {
		/*
		 * The packet is not of interest to this socket so
		 * drop it on the floor. Here the SAP is being used
		 * as a very course filter.
		 */
		ps->ps_stats.tp_drops++;
		ks_stats.kp_recv_bad_proto.value.ui64++;
		freemsg(mp);
		return;
	}

	/*
	 * This field is not often set, even for ethernet,
	 * by mac_header_info, so compute it if it is 0.
	 */
	if (hdr.mhi_pktsize == 0)
		hdr.mhi_pktsize = msgdsize(mp);

	/*
	 * If a BPF filter is present, pass the raw packet into that.
	 * A failed match will result in zero being returned, indicating
	 * that this socket is not interested in the packet.
	 */
	if (ps->ps_bpf.bf_len != 0) {
		uchar_t *buffer;
		int buflen;

		buflen = MBLKL(mp);
		if (hdr.mhi_pktsize == buflen) {
			buffer = mp->b_rptr;
		} else {
			buflen = 0;
			buffer = (uchar_t *)mp;
		}
		rw_enter(&ps->ps_bpflock, RW_READER);
		if (bpf_filter(ps->ps_bpf.bf_insns, buffer,
		    hdr.mhi_pktsize, buflen) == 0) {
			rw_exit(&ps->ps_bpflock);
			ps->ps_stats.tp_drops++;
			ks_stats.kp_recv_filtered.value.ui64++;
			freemsg(mp);
			return;
		}
		rw_exit(&ps->ps_bpflock);
	}

	if (ps->ps_type == SOCK_DGRAM) {
		/*
		 * SOCK_DGRAM socket expect a "layer 3" packet, so advance
		 * past the link layer header.
		 */
		mp->b_rptr += hdr.mhi_hdrsize;
		hdr.mhi_pktsize -= hdr.mhi_hdrsize;
	}

	tusz = sizeof (struct T_unitdata_ind) + sizeof (struct sockaddr_ll);
	if (ps->ps_auxdata) {
		tusz += _TPI_ALIGN_TOPT(sizeof (struct tpacket_auxdata));
		tusz += _TPI_ALIGN_TOPT(sizeof (struct T_opthdr));
	}

	/*
	 * It is tempting to think that this could be optimised by having
	 * the base mblk_t allocated and hung off the pfpsock structure,
	 * except that then another one would need to be allocated for the
	 * sockaddr_ll that is included. Even creating a template to copy
	 * from is of questionable value, as read-write from one structure
	 * to the other is going to be slower than all of the initialisation.
	 */
	mp0 = allocb(tusz, BPRI_HI);
	if (mp0 == NULL) {
		ps->ps_stats.tp_drops++;
		ks_stats.kp_recv_alloc_fail.value.ui64++;
		freemsg(mp);
		return;
	}

	(void) memset(mp0->b_rptr, 0, tusz);

	mp0->b_datap->db_type = M_PROTO;
	mp0->b_wptr = mp0->b_rptr + tusz;

	tunit = (struct T_unitdata_ind *)mp0->b_rptr;
	tunit->PRIM_type = T_UNITDATA_IND;
	tunit->SRC_length = sizeof (struct sockaddr);
	tunit->SRC_offset = sizeof (*tunit);

	sol = (struct sockaddr_ll *)&ps->ps_sock;
	sll = (struct sockaddr_ll *)(mp0->b_rptr + sizeof (*tunit));
	sll->sll_ifindex = sol->sll_ifindex;
	sll->sll_hatype = (uint16_t)hdr.mhi_origsap;
	sll->sll_halen = sol->sll_halen;
	if (hdr.mhi_saddr != NULL)
		(void) memcpy(sll->sll_addr, hdr.mhi_saddr, sll->sll_halen);

	switch (hdr.mhi_dsttype) {
	case MAC_ADDRTYPE_MULTICAST :
		sll->sll_pkttype = PACKET_MULTICAST;
		break;
	case MAC_ADDRTYPE_BROADCAST :
		sll->sll_pkttype = PACKET_BROADCAST;
		break;
	case MAC_ADDRTYPE_UNICAST :
		if (memcmp(sol->sll_addr, hdr.mhi_daddr, sol->sll_halen) == 0)
			sll->sll_pkttype = PACKET_HOST;
		else
			sll->sll_pkttype = PACKET_OTHERHOST;
		break;
	}

	if (ps->ps_auxdata) {
		struct tpacket_auxdata *aux;
		struct T_opthdr *topt;

		tunit->OPT_offset = _TPI_ALIGN_TOPT(tunit->SRC_offset +
		    sizeof (struct sockaddr_ll));
		tunit->OPT_length = _TPI_ALIGN_TOPT(sizeof (struct T_opthdr)) +
		    _TPI_ALIGN_TOPT(sizeof (struct tpacket_auxdata));

		topt = (struct T_opthdr *)(mp0->b_rptr + tunit->OPT_offset);
		aux = (struct tpacket_auxdata *)
		    ((char *)topt + _TPI_ALIGN_TOPT(sizeof (*topt)));

		topt->len = tunit->OPT_length;
		topt->level = SOL_PACKET;
		topt->name = PACKET_AUXDATA;
		topt->status = 0;
		/*
		 * libpcap doesn't seem to use any other field,
		 * so it isn't clear how they should be filled in.
		 */
		aux->tp_vlan_vci = hdr.mhi_tci;
	}

	linkb(mp0, mp);

	ps->ps_upcalls->su_recv(ps->ps_upper, mp0, hdr.mhi_pktsize, 0,
	    &error, NULL);

	if (error == 0) {
		ps->ps_stats.tp_packets++;
		ks_stats.kp_recv_ok.value.ui64++;
	} else {
		mutex_enter(&ps->ps_lock);
		if (error == ENOSPC) {
			ps->ps_upcalls->su_recv(ps->ps_upper, NULL, 0, 0,
			    &error, NULL);
			if (error == ENOSPC)
				ps->ps_flow_ctrld = B_TRUE;
		}
		mutex_exit(&ps->ps_lock);
		ps->ps_stats.tp_drops++;
		ks_stats.kp_recv_fail.value.ui64++;
	}
}
Ejemplo n.º 12
0
/*
 * Transmit a packet (low level interface)
 *
 * This routine is called from ip2xinet_tx. That function
 * grabbed the driver lock when it was called.
 */
void
ip2xinet_hw_tx(unsigned char *buf, int len, struct ip2xinet_dev *dev)
{
	/* This function deals with hw details, while all other procedures are rather
	   device-independent */
	struct iphdr *ih, *iph;
	struct ethhdr *eth;
	struct ip2xinet_priv *privp;
	queue_t *q;
	mblk_t *mp, *nmp;
	dl_unitdata_req_t *req;
	int mylen;

	/* sanity check */
	if (len < sizeof(struct ethhdr) + sizeof(struct iphdr)) {
		printk("ip2xinet: Hmm... packet too short (%i octets)\n", len);
		return;
	}
	/* Ethhdr is 14 bytes, but the kernel arranges for iphdr to be aligned (i.e., ethhdr is
	   unaligned) */
	ih = (struct iphdr *) (buf + sizeof(struct ethhdr));

	/* Ok, now the packet is ready for transmission: */

	/* Here we do a putq to the bottom q.  */

	privp = &dev->priv;
	q = ip2xinet_status.lowerq;

	/* THIS IS WHERE WE ALLOCATE UNITDATA_REQ and send data down */
	if ((mp = allocb(sizeof(struct iphdr) + DL_UNITDATA_REQ_SIZE, BPRI_LO)) == NULL) {
		printk("ip2xhwtx: failed: allocb failed");
		return;
	}
	mp->b_datap->db_type = M_PROTO;
	mp->b_wptr += (sizeof(struct iphdr) + DL_UNITDATA_REQ_SIZE);

	/* xinet expects a DLPI header ahead of the datagram, as in Unix. The destination address
	   in this header needs to be the next hop address.  We're going to get this from the
	   destination address in the Ethernet header and rely upon froute/x25route having added a
	   static ARP entry with:

	   IP address of machine at other end of circuit

	   MAC address equal to IP address of that same machine

	   Though the IP address of IP datagrams passed down to us may be many hops away, the
	   destination Ethernet address will always be the next hop IP address. */
	eth = (struct ethhdr *) (buf);
	iph = (struct iphdr *) (mp->b_rptr + DL_UNITDATA_REQ_SIZE);
	iph->saddr = ih->saddr;	/* likely unused by xinet */
	iph->daddr = (eth->h_dest[3] << 24)	/* next hop address */
	    +(eth->h_dest[2] << 16)
	    + (eth->h_dest[1] << 8)
	    + (eth->h_dest[0]);
	iph->check = 0;

	req = (dl_unitdata_req_t *) mp->b_rptr;
	req->dl_primitive = DL_UNITDATA_REQ;
	req->dl_dest_addr_length = 4;
	req->dl_dest_addr_offset = DL_UNITDATA_REQ_SIZE + (long) &((struct iphdr *) 0)->daddr;

	/* Copy from buf to mp, make everything right, then send the stuff to xinet IF WE CAN.
	   Could we use esballoc here? */

	mylen = len - sizeof(struct ethhdr);
	if ((nmp = allocb(mylen, BPRI_LO)) == NULL) {
		printk("ip2xhwtx: failed: allocb failed");
		freemsg(mp);
		return;
	}
	linkb(mp, nmp);
	bcopy(buf + sizeof(struct ethhdr), nmp->b_rptr, mylen);
	nmp->b_wptr += mylen;
	if (!putq(q, mp)) {
		mp->b_band = 0;
		putq(q, mp);
	}
	privp->stats.tx_packets++;
}
Ejemplo n.º 13
0
void
sctp_input_asconf(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp)
{
	const dispatch_t	*dp;
	mblk_t			*hmp;
	mblk_t			*mp;
	uint32_t		*idp;
	uint32_t		*hidp;
	ssize_t			rlen;
	sctp_parm_hdr_t		*ph;
	sctp_chunk_hdr_t	*ach;
	int			cont;
	int			act;
	uint16_t		plen;
	uchar_t			*alist = NULL;
	size_t			asize = 0;
	uchar_t			*dlist = NULL;
	size_t			dsize = 0;
	uchar_t			*aptr = NULL;
	uchar_t			*dptr = NULL;
	int			acount = 0;
	int			dcount = 0;
	sctp_stack_t		*sctps = sctp->sctp_sctps;

	ASSERT(ch->sch_id == CHUNK_ASCONF);

	idp = (uint32_t *)(ch + 1);
	rlen = ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*idp);

	if (rlen < 0 || rlen < sizeof (*idp)) {
		/* nothing there; bail out */
		return;
	}

	/* Check for duplicates */
	*idp = ntohl(*idp);
	if (*idp == (sctp->sctp_fcsn + 1)) {
		act = 1;
	} else if (*idp == sctp->sctp_fcsn) {
		act = 0;
	} else {
		/* stale or malicious packet; drop */
		return;
	}

	/* Create the ASCONF_ACK header */
	hmp = sctp_make_mp(sctp, fp, sizeof (*ach) + sizeof (*idp));
	if (hmp == NULL) {
		/* Let the peer retransmit */
		SCTP_KSTAT(sctps, sctp_send_asconf_ack_failed);
		return;
	}
	ach = (sctp_chunk_hdr_t *)hmp->b_wptr;
	ach->sch_id = CHUNK_ASCONF_ACK;
	ach->sch_flags = 0;
	/* Set the length later */
	hidp = (uint32_t *)(ach + 1);
	*hidp = htonl(*idp);
	hmp->b_wptr = (uchar_t *)(hidp + 1);

	/* Move to the Address Parameter */
	ph = (sctp_parm_hdr_t *)(idp + 1);
	if (rlen <= ntohs(ph->sph_len)) {
		freeb(hmp);
		return;
	}

	/*
	 * We already have the association here, so this address parameter
	 * doesn't seem to be very useful, should we make sure this is part
	 * of the association and send an error, if not?
	 * Ignore it for now.
	 */
	rlen -= ntohs(ph->sph_len);
	ph = (sctp_parm_hdr_t *)((char *)ph + ntohs(ph->sph_len));

	/*
	 * We need to pre-allocate buffer before processing the ASCONF
	 * chunk. We don't want to fail allocating buffers after processing
	 * the ASCONF chunk. So, we walk the list and get the number of
	 * addresses added and/or deleted.
	 */
	if (cl_sctp_assoc_change != NULL) {
		sctp_parm_hdr_t	*oph = ph;
		ssize_t		orlen = rlen;

		/*
		 * This not very efficient, but there is no better way of
		 * doing it.  It should be fine since normally the param list
		 * will not be very long.
		 */
		while (orlen > 0) {
			/* Sanity checks */
			if (orlen < sizeof (*oph))
				break;
			plen = ntohs(oph->sph_len);
			if (plen < sizeof (*oph) || plen > orlen)
				break;
			if (oph->sph_type == htons(PARM_ADD_IP))
				acount++;
			if (oph->sph_type == htons(PARM_DEL_IP))
				dcount++;
			oph = sctp_next_parm(oph, &orlen);
			if (oph == NULL)
				break;
		}
		if (acount > 0 || dcount > 0) {
			if (acount > 0) {
				asize = sizeof (in6_addr_t) * acount;
				alist = kmem_alloc(asize, KM_NOSLEEP);
				if (alist == NULL) {
					freeb(hmp);
					SCTP_KSTAT(sctps, sctp_cl_assoc_change);
					return;
				}
			}
			if (dcount > 0) {
				dsize = sizeof (in6_addr_t) * dcount;
				dlist = kmem_alloc(dsize, KM_NOSLEEP);
				if (dlist == NULL) {
					if (acount > 0)
						kmem_free(alist, asize);
					freeb(hmp);
					SCTP_KSTAT(sctps, sctp_cl_assoc_change);
					return;
				}
			}
			aptr = alist;
			dptr = dlist;
			/*
			 * We will get the actual count when we process
			 * the chunk.
			 */
			acount = 0;
			dcount = 0;
		}
	}
	cont = 1;
	while (rlen > 0 && cont) {
		in6_addr_t	addr;

		/* Sanity checks */
		if (rlen < sizeof (*ph))
			break;
		plen = ntohs(ph->sph_len);
		if (plen < sizeof (*ph) || plen > rlen) {
			break;
		}
		idp = (uint32_t *)(ph + 1);
		dp = sctp_lookup_asconf_dispatch(ntohs(ph->sph_type));
		ASSERT(dp);
		if (dp->asconf) {
			mp = dp->asconf(sctp, ph, *idp, fp, &cont, act, &addr);
			if (cont == -1) {
				/*
				 * Not even enough memory to create
				 * an out-of-resources error. Free
				 * everything and return; the peer
				 * should retransmit.
				 */
				freemsg(hmp);
				if (alist != NULL)
					kmem_free(alist, asize);
				if (dlist != NULL)
					kmem_free(dlist, dsize);
				return;
			}
			if (mp != NULL) {
				linkb(hmp, mp);
			} else if (act != 0) {
				/* update the add/delete list */
				if (cl_sctp_assoc_change != NULL) {
					if (ph->sph_type ==
					    htons(PARM_ADD_IP)) {
						ASSERT(alist != NULL);
						bcopy(&addr, aptr,
						    sizeof (addr));
						aptr += sizeof (addr);
						acount++;
					} else if (ph->sph_type ==
					    htons(PARM_DEL_IP)) {
						ASSERT(dlist != NULL);
						bcopy(&addr, dptr,
						    sizeof (addr));
						dptr += sizeof (addr);
						dcount++;
					}
				}
			}
		}
		ph = sctp_next_parm(ph, &rlen);
		if (ph == NULL)
			break;
	}

	/*
	 * Update clustering's state for this assoc. Note acount/dcount
	 * could be zero (i.e. if the add/delete address(es) were not
	 * processed successfully). Regardless, if the ?size is > 0,
	 * it is the clustering module's responsibility to free the lists.
	 */
	if (cl_sctp_assoc_change != NULL) {
		(*cl_sctp_assoc_change)(sctp->sctp_connp->conn_family,
		    alist, asize,
		    acount, dlist, dsize, dcount, SCTP_CL_PADDR,
		    (cl_sctp_handle_t)sctp);
		/* alist and dlist will be freed by the clustering module */
	}
	/* Now that the params have been processed, increment the fcsn */
	if (act) {
		sctp->sctp_fcsn++;
	}
	BUMP_LOCAL(sctp->sctp_obchunks);

	if (fp->isv4)
		ach->sch_len = htons(msgdsize(hmp) - sctp->sctp_hdr_len);
	else
		ach->sch_len = htons(msgdsize(hmp) - sctp->sctp_hdr6_len);

	sctp_set_iplen(sctp, hmp, fp->ixa);
	(void) conn_ip_output(hmp, fp->ixa);
	BUMP_LOCAL(sctp->sctp_opkts);
	sctp_validate_peer(sctp);
}
Ejemplo n.º 14
0
static int
sctp_asconf_send(sctp_t *sctp, sctp_asconf_t *asc, sctp_faddr_t *fp,
    sctp_cl_ainfo_t *ainfo)
{
	mblk_t			*mp, *nmp;
	sctp_chunk_hdr_t	*ch;
	boolean_t		isv4;
	size_t			msgsize;

	ASSERT(asc != NULL && asc->head != NULL);

	isv4 = (fp != NULL) ? fp->isv4 : sctp->sctp_current->isv4;

	/* SCTP chunk header + Serial Number + Address Param TLV */
	msgsize = sizeof (*ch) + sizeof (uint32_t) +
	    (isv4 ? PARM_ADDR4_LEN : PARM_ADDR6_LEN);

	mp = allocb(msgsize, BPRI_MED);
	if (mp == NULL)
		return (ENOMEM);

	mp->b_wptr += msgsize;
	mp->b_cont = asc->head;

	ch = (sctp_chunk_hdr_t *)mp->b_rptr;
	ch->sch_id = CHUNK_ASCONF;
	ch->sch_flags = 0;
	ch->sch_len = htons(msgdsize(mp));

	nmp = msgpullup(mp, -1);
	if (nmp == NULL) {
		freeb(mp);
		return (ENOMEM);
	}

	/*
	 * Stash the address list and the count so that when the operation
	 * completes, i.e. when as get an ACK, we can update the clustering's
	 * state for this association.
	 */
	if (ainfo != NULL) {
		ASSERT(cl_sctp_assoc_change != NULL);
		ASSERT(nmp->b_prev == NULL);
		nmp->b_prev = (mblk_t *)ainfo;
	}
	/* Clean up the temporary mblk chain */
	freemsg(mp);
	asc->head = NULL;
	asc->cid = 0;

	/* Queue it ... */
	if (sctp->sctp_cxmit_list == NULL) {
		sctp->sctp_cxmit_list = nmp;
	} else {
		linkb(sctp->sctp_cxmit_list, nmp);
	}

	BUMP_LOCAL(sctp->sctp_obchunks);

	/* And try to send it. */
	sctp_wput_asconf(sctp, fp);

	return (0);
}
Ejemplo n.º 15
0
/*
 * sppp_dlunitdatareq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Handle DL_UNITDATA_REQ request, called by sppp_mproto. This procedure
 *    gets called for M_PROTO (DLPI) style of transmission. The fact that we
 *    have acknowledged IP's fastpath probing (DL_IOC_HDR_INFO) does not
 *    guarantee that IP will always transmit via M_DATA, and it merely implies
 *    that such situation _may_ happen. In other words, IP may decide to use
 *    M_PROTO (DLPI) for data transmission should it decide to do so.
 *    Therefore, we should never place any restrictions or checks against
 *    streams marked with SPS_FASTPATH, since it is legal for this procedure
 *    to be entered with or without the bit set.
 */
static int
sppp_dlunitdatareq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	sppa_t		*ppa;
	mblk_t		*hdrmp;
	mblk_t		*pktmp;
	dl_unitdata_req_t *dludp;
	int		dladdroff;
	int		dladdrlen;
	int		msize;
	int		error = 0;
	boolean_t	is_promisc;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT((MTYPE(mp) == M_PCPROTO) || (MTYPE(mp) == M_PROTO));
	dludp = (dl_unitdata_req_t *)mp->b_rptr;
	dladdroff = dludp->dl_dest_addr_offset;
	dladdrlen = dludp->dl_dest_addr_length;
	ASSERT(sps != NULL);
	ASSERT(!IS_SPS_PIOATTACH(sps));
	ASSERT(sps->sps_dlstate == DL_IDLE);
	ASSERT(q->q_ptr == sps);
	/*
	 * If this stream is not attached to any ppas, then discard data
	 * coming down through this stream.
	 */
	ppa = sps->sps_ppa;
	if (ppa == NULL) {
		DBGERROR((CE_CONT, "DLPI unitdata: no attached ppa\n"));
		error = ENOLINK;
	} else if (mp->b_cont == NULL) {
		DBGERROR((CE_CONT, "DLPI unitdata: missing data\n"));
		error = EPROTO;
	}
	if (error != 0) {
		dluderrorind(q, mp, mp->b_rptr + dladdroff, dladdrlen,
		    DL_BADDATA, error);
		return (0);
	}
	ASSERT(mp->b_cont->b_rptr != NULL);
	/*
	 * Check if outgoing packet size is larger than allowed. We use
	 * msgdsize to count all of M_DATA blocks in the message.
	 */
	msize = msgdsize(mp);
	if (msize > ppa->ppa_mtu) {
		/* Log, and send it anyway */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_otoolongs++;
		mutex_exit(&ppa->ppa_sta_lock);
	}
	if (IS_SPS_KDEBUG(sps)) {
		SPDEBUG(PPP_DRV_NAME
		    "/%d: DL_UNITDATA_REQ (%d bytes) sps=0x%p flags=0x%b "
		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
	}
	/* Allocate a message (M_DATA) to contain PPP header bytes. */
	if ((hdrmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_allocbfail++;
		mutex_exit(&ppa->ppa_sta_lock);
		DBGERROR((CE_CONT,
		    "DLPI unitdata: can't allocate header buffer\n"));
		dluderrorind(q, mp, mp->b_rptr + dladdroff, dladdrlen,
		    DL_SYSERR, ENOSR);
		return (0);
	}
	/*
	 * Should there be any promiscuous stream(s), send the data up
	 * for each promiscuous stream that we recognize.
	 */
	rw_enter(&ppa->ppa_sib_lock, RW_READER);
	is_promisc = ppa->ppa_promicnt;
	if (is_promisc) {
		ASSERT(ppa->ppa_streams != NULL);
		sppp_dlprsendup(ppa->ppa_streams, mp->b_cont, sps->sps_sap,
		    B_FALSE);
	}
	rw_exit(&ppa->ppa_sib_lock);
	/* Discard DLPI header and keep only IP payload (mp->b_cont). */
	pktmp = mp->b_cont;
	mp->b_cont = NULL;
	freemsg(mp);
	mp = hdrmp;

	*(uchar_t *)mp->b_wptr++ = PPP_ALLSTATIONS;
	*(uchar_t *)mp->b_wptr++ = PPP_UI;
	*(uchar_t *)mp->b_wptr++ = ((uint16_t)sps->sps_sap >> 8) & 0xff;
	*(uchar_t *)mp->b_wptr++ = ((uint16_t)sps->sps_sap) & 0xff;
	ASSERT(MBLKL(mp) == PPP_HDRLEN);

	linkb(mp, pktmp);
	/*
	 * Only time-stamp the packet with hrtime if the upper stream
	 * is configured to do so.
	 */
	if (IS_PPA_TIMESTAMP(ppa)) {
		ppa->ppa_lasttx = gethrtime();
	}
	/*
	 * Just put this back on the queue and allow the write service
	 * routine to handle it.  We're nested too deeply here to
	 * rewind the stack sufficiently to prevent overflow.  This is
	 * the slow path anyway.
	 */
	if (putq(q, mp) == 0) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_oqdropped++;
		mutex_exit(&ppa->ppa_sta_lock);
		freemsg(mp);
	} else {
		qenable(q);
	}
	return (0);
}
Ejemplo n.º 16
0
/*
 * cvc_wsrv()
 *	cvc_wsrv handles mblks that have been queued by cvc_wput either because
 *	the IOSRAM path was selected or the queue contained preceding mblks.  To
 *	optimize processing (particularly if the IOSRAM path is selected), all
 *	mblks are pulled off of the queue and chained together.  Then, if there
 *	are any mblks on the chain, they are either forwarded to cvcredir or
 *	sent for IOSRAM processing as appropriate given current circumstances.
 *	IOSRAM processing may not be able to handle all of the data in the
 *	chain, in which case the remaining data is placed back on the queue and
 *	a timeout routine is registered to reschedule cvc_wsrv in the future.
 *	Automatic scheduling of the queue is disabled (noenable(q)) while
 *	cvc_wsrv is running to avoid superfluous calls.
 */
static int
cvc_wsrv(queue_t *q)
{
	mblk_t *total_mp = NULL;
	mblk_t *mp;

	if (cvc_stopped == 1 || cvc_suspended == 1) {
		return (0);
	}

	rw_enter(&cvclock, RW_READER);
	noenable(q);

	/*
	 * If there's already a timeout registered for scheduling this routine
	 * in the future, it's a safe bet that we don't want to run right now.
	 */
	if (cvc_timeout_id != (timeout_id_t)-1) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Start by linking all of the queued M_DATA mblks into a single chain
	 * so we can flush as much as possible to IOSRAM (if we choose that
	 * route).
	 */
	while ((mp = getq(q)) != NULL) {
		/*
		 * Technically, certain IOCTLs are supposed to be processed only
		 * after all preceding data has completely "drained".  In an
		 * attempt to support that, we delay processing of those IOCTLs
		 * until this point.  It is still possible that an IOCTL will be
		 * processed before all preceding data is drained, for instance
		 * in the case where not all of the preceding data would fit
		 * into IOSRAM and we have to place it back on the queue.
		 * However, since none of these IOCTLs really appear to have any
		 * relevance for cvc, and we weren't supporting delayed
		 * processing at _all_ previously, this partial implementation
		 * should suffice.  (Fully implementing the delayed IOCTL
		 * processing would be unjustifiably difficult given the nature
		 * of the underlying IOSRAM console protocol.)
		 */
		if (mp->b_datap->db_type == M_IOCTL) {
			cvc_ioctl(q, mp);
			continue;
		}

		/*
		 * We know that only M_IOCTL and M_DATA blocks are placed on our
		 * queue.  Since this block isn't an M_IOCTL, it must be M_DATA.
		 */
		if (total_mp != NULL) {
			linkb(total_mp, mp);
		} else {
			total_mp = mp;
		}
	}

	/*
	 * Do we actually have anything to do?
	 */
	if (total_mp == NULL) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Yes, we do, so send the data to either cvcredir or IOSRAM as
	 * appropriate.  In the latter case, we might not be able to transmit
	 * everything right now, so re-queue the remainder.
	 */
	if (cvcoutput_q != NULL && !via_iosram) {
		CVC_DBG0(CVC_DBG_NETWORK_WR, "Sending to cvcredir.");
		/*
		 * XXX - should canputnext be called here?  Starfire's cvc
		 * doesn't do that, and it appears to work anyway.
		 */
		(void) putnext(cvcoutput_q, total_mp);
	} else {
		CVC_DBG0(CVC_DBG_IOSRAM_WR, "Send to IOSRAM.");
		cvc_send_to_iosram(&total_mp);
		if (total_mp != NULL) {
			(void) putbq(q, total_mp);
		}
	}

	/*
	 * If there is still data queued at this point, make sure the queue
	 * gets scheduled again after an appropriate delay (which has been
	 * somewhat arbitrarily selected as half of the SC's input polling
	 * frequency).
	 */
	enableok(q);
	if (q->q_first != NULL) {
		if (cvc_timeout_id == (timeout_id_t)-1) {
			cvc_timeout_id = timeout(cvc_flush_queue,
			    NULL, drv_usectohz(CVC_IOSRAM_POLL_USECS / 2));
		}
	}
	rw_exit(&cvclock);
	return (0);
}