static void packetize_and_send(MSFilter *f, EncState *s, mblk_t *om, uint32_t timestamp, uint8_t tdt){ mblk_t *packet; mblk_t *h; int npackets=0; static const int ident=0xdede; while(om!=NULL){ if (om->b_wptr-om->b_rptr>=s->mtu){ packet=dupb(om); packet->b_wptr=packet->b_rptr+s->mtu; om->b_rptr=packet->b_wptr; }else { packet=om; om=NULL; } ++npackets; h=allocb(6,0); if (npackets==1){ if (om==NULL) payload_header_set(h->b_wptr,ident,NOT_FRAGMENTED,tdt,1); else payload_header_set(h->b_wptr,ident,START_FRAGMENT,tdt,1); }else{ if (om==NULL) payload_header_set(h->b_wptr,ident,END_FRAGMENT,tdt,1); else payload_header_set(h->b_wptr,ident,CONT_FRAGMENT,tdt,1); } h->b_wptr+=4; *((uint16_t*)h->b_wptr)=htons(msgdsize(packet)); h->b_wptr+=2; h->b_cont=packet; mblk_set_timestamp_info(h,timestamp); ms_debug("sending theora frame of size %i",msgdsize(h)); ms_queue_put(f->outputs[0],h); } }
/* * Schedule a uioamove() on a mblk. This is done as mblks are enqueued * by the protocol on the socket's rcv queue. * * Caller must be holding so_lock. */ void sod_uioa_mblk_init(struct sodirect_s *sodp, mblk_t *mp, size_t msg_size) { uioa_t *uioap = &sodp->sod_uioa; mblk_t *mp1 = mp; mblk_t *lmp = NULL; ASSERT(DB_TYPE(mp) == M_DATA); ASSERT(msg_size == msgdsize(mp)); if (uioap->uioa_state & UIOA_ENABLED) { /* Uioa is enabled */ if (msg_size > uioap->uio_resid) { /* * There isn't enough uio space for the mblk_t chain * so disable uioa such that this and any additional * mblk_t data is handled by the socket and schedule * the socket for wakeup to finish this uioa. */ uioap->uioa_state &= UIOA_CLR; uioap->uioa_state |= UIOA_FINI; return; } do { uint32_t len = MBLKL(mp1); if (!uioamove(mp1->b_rptr, len, UIO_READ, uioap)) { /* Scheduled, mark dblk_t as such */ DB_FLAGS(mp1) |= DBLK_UIOA; } else { /* Error, turn off async processing */ uioap->uioa_state &= UIOA_CLR; uioap->uioa_state |= UIOA_FINI; break; } lmp = mp1; } while ((mp1 = mp1->b_cont) != NULL); if (mp1 != NULL || uioap->uio_resid == 0) { /* Break the mblk chain if neccessary. */ if (mp1 != NULL && lmp != NULL) { mp->b_next = mp1; lmp->b_cont = NULL; } } } }
static void pulse_write_process(MSFilter *f){ PulseWriteState *s=(PulseWriteState*)f->data; mblk_t *im; while((im=ms_queue_get(f->inputs[0]))!=NULL){ int bsize=msgdsize(im); if (s->stream){ pa_threaded_mainloop_lock(pa_loop); if (pa_stream_writable_size(s->stream)>=bsize){ //ms_message("Pushing data to pulseaudio"); pa_stream_write(s->stream,im->b_rptr,bsize,NULL,0,PA_SEEK_RELATIVE); } pa_threaded_mainloop_unlock(pa_loop); } freemsg(im); } }
/*APP accessors */ bool_t rtcp_is_APP(const mblk_t *m){ const rtcp_common_header_t *ch=rtcp_get_common_header(m); if (ch!=NULL && rtcp_common_header_get_packet_type(ch)==RTCP_APP){ if (msgdsize(m)<sizeof(rtcp_common_header_t)+ rtcp_common_header_get_length(ch)){ ortp_warning("Too short RTCP APP packet."); return FALSE; } if (sizeof(rtcp_common_header_t)+rtcp_common_header_get_length(ch) < sizeof(rtcp_app_t)){ ortp_warning("Bad RTCP APP packet."); return FALSE; } return TRUE; } return FALSE; }
static mblk_t * make_rtcp_fb_rpsi(RtpSession *session, uint8_t *bit_string, uint16_t bit_string_len) { uint16_t bit_string_len_in_bytes; int additional_bytes; int size; mblk_t *h; rtcp_common_header_t *ch; rtcp_fb_header_t *fbh; rtcp_fb_rpsi_fci_t *fci; int i; /* Calculate packet size and allocate memory. */ bit_string_len_in_bytes = (bit_string_len / 8) + (((bit_string_len % 8) == 0) ? 0 : 1); additional_bytes = bit_string_len_in_bytes - 2; if (additional_bytes < 0) additional_bytes = 0; size = sizeof(rtcp_common_header_t) + sizeof(rtcp_fb_header_t) + sizeof(rtcp_fb_rpsi_fci_t) + additional_bytes; h = allocb(size, 0); /* Fill RPSI */ ch = (rtcp_common_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_common_header_t); fbh = (rtcp_fb_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_header_t); fci = (rtcp_fb_rpsi_fci_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_rpsi_fci_t); fbh->packet_sender_ssrc = htonl(rtp_session_get_send_ssrc(session)); fbh->media_source_ssrc = htonl(rtp_session_get_recv_ssrc(session)); if (bit_string_len <= 16) { fci->pb = 16 - bit_string_len; memset(&fci->bit_string, 0, 2); } else { fci->pb = (bit_string_len - 16) % 32; memset(&fci->bit_string, 0, bit_string_len_in_bytes); } fci->payload_type = rtp_session_get_recv_payload_type(session) & 0x7F; memcpy(&fci->bit_string, bit_string, bit_string_len / 8); for (i = 0; i < (bit_string_len % 8); i++) { fci->bit_string[bit_string_len_in_bytes - 1] |= (bit_string[bit_string_len_in_bytes - 1] & (1 << (7 - i))); } /* Fill common header */ rtcp_common_header_init(ch, session, RTCP_PSFB, RTCP_PSFB_RPSI, msgdsize(h)); return h; }
static void enc_process(MSFilter *f){ mblk_t *im,*om; ogg_packet op; EncState *s=(EncState*)f->data; uint64_t timems=f->ticker->time; uint32_t timestamp=timems*90; uint64_t elapsed; while((im=ms_queue_get(f->inputs[0]))!=NULL){ /*for the firsts frames only send theora packed conf*/ om=NULL; if (s->nframes==0){ s->start_time=timems; } elapsed=timems-s->start_time; if (need_send_conf(s,elapsed)){ if (s->packed_conf) { om=dupmsg(s->packed_conf); ms_message("sending theora packed conf (%i bytes)",msgdsize(om)); packetize_and_send(f,s,om,timestamp,THEORA_PACKED_CONF); }else { ms_error("No packed conf to send."); } }else{ enc_fill_yuv(&s->yuv,im); ms_debug("subtmitting yuv frame to theora encoder..."); if (theora_encode_YUVin(&s->tstate,&s->yuv)!=0){ ms_error("theora_encode_YUVin error."); }else{ if (theora_encode_packetout(&s->tstate,0,&op)==1){ ms_debug("Got theora coded frame"); om=allocb(op.bytes,0); memcpy(om->b_wptr,op.packet,op.bytes); om->b_wptr+=op.bytes; packetize_and_send(f,s,om,timestamp,THEORA_RAW_DATA); } } } freemsg(im); s->nframes++; } }
/*ARGSUSED*/ static int ip_isvalidchecksum(net_handle_t neti, mblk_t *mp) { unsigned char *wptr; ipha_t *ipha = (ipha_t *)mp->b_rptr; int hlen; int ret; ASSERT(mp != NULL); if (dohwcksum && DB_CKSUM16(mp) != 0xFFFF && (DB_CKSUMFLAGS(mp) & HCK_FULLCKSUM) && (DB_CKSUMFLAGS(mp) & HCK_FULLCKSUM_OK) && (DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) return (1); hlen = (ipha->ipha_version_and_hdr_length & 0x0F) << 2; /* * Check that the mblk being passed in has enough data in it * before blindly checking ip_cksum. */ if (msgdsize(mp) < hlen) return (0); if (mp->b_wptr < mp->b_rptr + hlen) { if (pullupmsg(mp, hlen) == 0) return (0); wptr = mp->b_wptr; } else { wptr = mp->b_wptr; mp->b_wptr = mp->b_rptr + hlen; } if (ipha->ipha_hdr_checksum == ip_cksum(mp, 0, ipha->ipha_hdr_checksum)) ret = 1; else ret = 0; mp->b_wptr = wptr; return (ret); }
static void playout_buf(WinSnd *d, WAVEHDR *hdr, mblk_t *m){ MMRESULT mr; hdr->dwUser=(DWORD)m; hdr->lpData=(LPSTR)m->b_rptr; hdr->dwBufferLength=msgdsize(m); hdr->dwFlags = 0; mr = waveOutPrepareHeader(d->outdev,hdr,sizeof(*hdr)); if (mr != MMSYSERR_NOERROR){ ms_error("waveOutPrepareHeader() error"); d->stat_notplayed++; } mr=waveOutWrite(d->outdev,hdr,sizeof(*hdr)); if (mr != MMSYSERR_NOERROR){ ms_error("waveOutWrite() error"); d->stat_notplayed++; }else { d->nbufs_playing++; } }
static mblk_t * make_rtcp_fb_pli(RtpSession *session) { int size = sizeof(rtcp_common_header_t) + sizeof(rtcp_fb_header_t); mblk_t *h= allocb(size, 0); rtcp_common_header_t *ch; rtcp_fb_header_t *fbh; /* Fill PLI */ ch = (rtcp_common_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_common_header_t); fbh = (rtcp_fb_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_header_t); fbh->packet_sender_ssrc = htonl(rtp_session_get_send_ssrc(session)); fbh->media_source_ssrc = htonl(rtp_session_get_recv_ssrc(session)); /* Fill common header */ rtcp_common_header_init(ch, session, RTCP_PSFB, RTCP_PSFB_PLI, msgdsize(h)); return h; }
static void rtp_session_schedule_first_rtcp_send(RtpSession *session) { uint64_t tc; size_t overhead; size_t report_size; size_t sdes_size; size_t xr_size = 0; OrtpRtcpSendAlgorithm *sa = &session->rtcp.send_algo; if ((session->rtcp.enabled == FALSE) || (session->target_upload_bandwidth == 0) || (sa->initialized == TRUE)) return; overhead = (ortp_stream_is_ipv6(&session->rtcp.gs) == TRUE) ? IP6_UDP_OVERHEAD : IP_UDP_OVERHEAD; sdes_size = (session->full_sdes != NULL) ? msgdsize(session->full_sdes) + sizeof(rtcp_common_header_t) : 0; switch (session->mode) { case RTP_SESSION_RECVONLY: report_size = sizeof(rtcp_rr_t); break; case RTP_SESSION_SENDONLY: report_size = sizeof(rtcp_sr_t) - sizeof(report_block_t); break; case RTP_SESSION_SENDRECV: default: report_size = sizeof(rtcp_sr_t); break; } if (session->rtcp.xr_conf.enabled == TRUE) { if (session->rtcp.xr_conf.rcvr_rtt_mode != OrtpRtcpXrRcvrRttNone) xr_size += sizeof(rtcp_xr_header_t) + sizeof(rtcp_xr_rcvr_rtt_report_block_t); if (session->rtcp.xr_conf.stat_summary_enabled == TRUE) xr_size += sizeof(rtcp_xr_header_t) + sizeof(rtcp_xr_stat_summary_report_block_t); if (session->rtcp.xr_conf.voip_metrics_enabled == TRUE) xr_size += sizeof(rtcp_xr_header_t) + sizeof(rtcp_xr_voip_metrics_report_block_t); } sa->avg_rtcp_size = (float)(overhead + report_size + sdes_size + xr_size); sa->initialized = TRUE; tc = ortp_get_cur_time_ms(); compute_rtcp_interval(session); if (sa->T_rr > 0) sa->tn = tc + sa->T_rr; sa->tp = tc; sa->t_rr_last = tc; sa->Tmin = 0; }
/** * Reads telephony events from a rtp packet. *@tab points to the beginning of the event buffer. * * @param session a rtp session from which telephony events are received. * @param packet a rtp packet as a mblk_t. * @param tab the address of a pointer. * @return the number of events in the packet if successfull, 0 if the packet did not contain telephony events. **/ int rtp_session_read_telephone_event(RtpSession *session, mblk_t *packet,telephone_event_t **tab) { int datasize; int num; int i; telephone_event_t *tev; rtp_header_t *hdr=(rtp_header_t*)packet->b_rptr; return_val_if_fail(packet->b_cont!=NULL,-1); if (hdr->paytype!=session->rcv.telephone_events_pt) return 0; /* this is not tel ev.*/ datasize=msgdsize(packet); tev=*tab=(telephone_event_t*)packet->b_cont->b_rptr; /* convert from network to host order what should be */ num=datasize/sizeof(telephone_event_t); for (i=0; i<num; i++) { tev[i].duration=ntohs(tev[i].duration); } return num; }
/* * Frees mp on failure */ static mblk_t * sctp_asconf_prepend_errwrap(mblk_t *mp, uint32_t cid) { mblk_t *wmp; sctp_parm_hdr_t *wph; /* Prepend a wrapper err cause ind param */ wmp = allocb(sizeof (*wph) + sizeof (cid), BPRI_MED); if (wmp == NULL) { freemsg(mp); return (NULL); } wmp->b_wptr += sizeof (*wph) + sizeof (cid); wph = (sctp_parm_hdr_t *)wmp->b_rptr; wph->sph_type = htons(PARM_ERROR_IND); wph->sph_len = htons(msgdsize(mp) + sizeof (*wph) + sizeof (cid)); bcopy(&cid, wph + 1, sizeof (uint32_t)); wmp->b_cont = mp; return (wmp); }
static void process_t140_packet(RealTimeTextSinkData *stream, mblk_t *packet) { int seqno = mblk_get_cseq(packet); uint8_t *payload = packet->b_rptr; size_t payloadsize = msgdsize(packet); ms_debug("t140 seqno:%i", seqno); if (stream->flags & TS_FLAG_NOTFIRST) { int t = red_needed(seqno, stream->prevseqno); if (t < 0) { ms_warning("packet arrived out of order"); return; } else if (t > 0) { stream->inbufsize = 3; insert_lost_char(stream->inbuf); } } if (read_t140_data(stream, payload, (int)payloadsize)) { return; /* return without updatting seqno */ } stream->prevseqno = seqno; }
/* * parse_packet(packet, mp) * * parses the given message block into a ipgpc_packet_t structure */ void parse_packet(ipgpc_packet_t *packet, mblk_t *mp) { ipha_t *ipha; /* parse message block for IP header and ports */ ipha = (ipha_t *)mp->b_rptr; /* get ip header */ V4_PART_OF_V6(packet->saddr) = (int32_t)ipha->ipha_src; V4_PART_OF_V6(packet->daddr) = (int32_t)ipha->ipha_dst; packet->dsfield = ipha->ipha_type_of_service; packet->proto = ipha->ipha_protocol; packet->sport = 0; packet->dport = 0; find_ids(packet, mp); packet->len = msgdsize(mp); /* parse out TCP/UDP ports, if appropriate */ if ((packet->proto == IPPROTO_TCP) || (packet->proto == IPPROTO_UDP) || (packet->proto == IPPROTO_SCTP)) { get_port_info(packet, ipha, AF_INET, mp); } }
static mblk_t * make_rtcp_fb_tmmbr(RtpSession *session, uint64_t mxtbr, uint16_t measured_overhead) { int size = sizeof(rtcp_common_header_t) + sizeof(rtcp_fb_header_t) + sizeof(rtcp_fb_tmmbr_fci_t); mblk_t *h = allocb(size, 0); rtcp_common_header_t *ch; rtcp_fb_header_t *fbh; rtcp_fb_tmmbr_fci_t *fci; uint8_t mxtbr_exp = 0; uint32_t mxtbr_mantissa = 0; /* Compute mxtbr exp and mantissa */ while (mxtbr >= (1 << 17)) { mxtbr >>= 1; mxtbr_exp++; } mxtbr_mantissa = mxtbr & 0x0001FFFF; /* Fill TMMBR */ ch = (rtcp_common_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_common_header_t); fbh = (rtcp_fb_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_header_t); fci = (rtcp_fb_tmmbr_fci_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_tmmbr_fci_t); fbh->packet_sender_ssrc = htonl(rtp_session_get_send_ssrc(session)); fbh->media_source_ssrc = htonl(0); fci->ssrc = htonl(rtp_session_get_recv_ssrc(session)); rtcp_fb_tmmbr_fci_set_mxtbr_exp(fci, mxtbr_exp); rtcp_fb_tmmbr_fci_set_mxtbr_mantissa(fci, mxtbr_mantissa); rtcp_fb_tmmbr_fci_set_measured_overhead(fci, measured_overhead); /* Fill common header */ rtcp_common_header_init(ch, session, RTCP_RTPFB, RTCP_RTPFB_TMMBR, msgdsize(h)); /* Store packet to be able to retransmit. */ if (session->rtcp.tmmbr_info.sent) freemsg(session->rtcp.tmmbr_info.sent); session->rtcp.tmmbr_info.sent = copymsg(h); return h; }
/* * set the q_ptr of the 'q' to the conn_t pointer passed in */ static void ip_helper_share_conn(queue_t *q, mblk_t *mp, cred_t *crp) { conn_t *connp = *((conn_t **)mp->b_cont->b_rptr); /* * This operation is allowed only on helper streams with kcred */ if (kcred != crp || msgdsize(mp->b_cont) != sizeof (void *)) { miocnak(q, mp, 0, EINVAL); return; } connp->conn_helper_info->iphs_minfo = q->q_ptr; connp->conn_helper_info->iphs_rq = RD(q); connp->conn_helper_info->iphs_wq = WR(q); WR(q)->q_ptr = RD(q)->q_ptr = (void *)connp; connp->conn_rq = RD(q); connp->conn_wq = WR(q); miocack(q, mp, 0, 0); }
mblk_t* rtp_session_create_rtcp_sdes_packet(RtpSession *session) { mblk_t *mp=allocb(sizeof(rtcp_common_header_t),0); rtcp_common_header_t *rtcp; mblk_t *tmp,*m=mp; queue_t *q; int rc=0; rtcp = (rtcp_common_header_t*)mp->b_wptr; mp->b_wptr+=sizeof(rtcp_common_header_t); /* concatenate all sdes chunks */ sdes_chunk_set_ssrc(session->sd,session->snd.ssrc); m=concatb(m,dupmsg(session->sd)); rc++; q=&session->contributing_sources; for (tmp=qbegin(q); !qend(q,tmp); tmp=qnext(q,mp)){ m=concatb(m,dupmsg(tmp)); rc++; } rtcp_common_header_init(rtcp,session,RTCP_SDES,rc,msgdsize(mp)); return mp; }
static mblk_t * make_rtcp_fb_generic_nack(RtpSession *session, uint16_t pid, uint16_t blp) { int size = sizeof(rtcp_common_header_t) + sizeof(rtcp_fb_header_t) + sizeof(rtcp_fb_generic_nack_fci_t); mblk_t *h = allocb(size, 0); rtcp_common_header_t *ch; rtcp_fb_header_t *fbh; rtcp_fb_generic_nack_fci_t *fci; ch = (rtcp_common_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_common_header_t); fbh = (rtcp_fb_header_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_header_t); fci = (rtcp_fb_generic_nack_fci_t *)h->b_wptr; h->b_wptr += sizeof(rtcp_fb_generic_nack_fci_t); fbh->packet_sender_ssrc = htonl(rtp_session_get_send_ssrc(session)); fbh->media_source_ssrc = htonl(0); rtcp_fb_generic_nack_fci_set_pid(fci, pid); rtcp_fb_generic_nack_fci_set_blp(fci, blp); /* Fill common header */ rtcp_common_header_init(ch, session, RTCP_RTPFB, RTCP_RTPFB_NACK, msgdsize(h)); return h; }
void sctp_input_asconf(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp) { const dispatch_t *dp; mblk_t *hmp; mblk_t *mp; uint32_t *idp; uint32_t *hidp; ssize_t rlen; sctp_parm_hdr_t *ph; sctp_chunk_hdr_t *ach; int cont; int act; uint16_t plen; uchar_t *alist = NULL; size_t asize = 0; uchar_t *dlist = NULL; size_t dsize = 0; uchar_t *aptr = NULL; uchar_t *dptr = NULL; int acount = 0; int dcount = 0; sctp_stack_t *sctps = sctp->sctp_sctps; ASSERT(ch->sch_id == CHUNK_ASCONF); idp = (uint32_t *)(ch + 1); rlen = ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*idp); if (rlen < 0 || rlen < sizeof (*idp)) { /* nothing there; bail out */ return; } /* Check for duplicates */ *idp = ntohl(*idp); if (*idp == (sctp->sctp_fcsn + 1)) { act = 1; } else if (*idp == sctp->sctp_fcsn) { act = 0; } else { /* stale or malicious packet; drop */ return; } /* Create the ASCONF_ACK header */ hmp = sctp_make_mp(sctp, fp, sizeof (*ach) + sizeof (*idp)); if (hmp == NULL) { /* Let the peer retransmit */ SCTP_KSTAT(sctps, sctp_send_asconf_ack_failed); return; } ach = (sctp_chunk_hdr_t *)hmp->b_wptr; ach->sch_id = CHUNK_ASCONF_ACK; ach->sch_flags = 0; /* Set the length later */ hidp = (uint32_t *)(ach + 1); *hidp = htonl(*idp); hmp->b_wptr = (uchar_t *)(hidp + 1); /* Move to the Address Parameter */ ph = (sctp_parm_hdr_t *)(idp + 1); if (rlen <= ntohs(ph->sph_len)) { freeb(hmp); return; } /* * We already have the association here, so this address parameter * doesn't seem to be very useful, should we make sure this is part * of the association and send an error, if not? * Ignore it for now. */ rlen -= ntohs(ph->sph_len); ph = (sctp_parm_hdr_t *)((char *)ph + ntohs(ph->sph_len)); /* * We need to pre-allocate buffer before processing the ASCONF * chunk. We don't want to fail allocating buffers after processing * the ASCONF chunk. So, we walk the list and get the number of * addresses added and/or deleted. */ if (cl_sctp_assoc_change != NULL) { sctp_parm_hdr_t *oph = ph; ssize_t orlen = rlen; /* * This not very efficient, but there is no better way of * doing it. It should be fine since normally the param list * will not be very long. */ while (orlen > 0) { /* Sanity checks */ if (orlen < sizeof (*oph)) break; plen = ntohs(oph->sph_len); if (plen < sizeof (*oph) || plen > orlen) break; if (oph->sph_type == htons(PARM_ADD_IP)) acount++; if (oph->sph_type == htons(PARM_DEL_IP)) dcount++; oph = sctp_next_parm(oph, &orlen); if (oph == NULL) break; } if (acount > 0 || dcount > 0) { if (acount > 0) { asize = sizeof (in6_addr_t) * acount; alist = kmem_alloc(asize, KM_NOSLEEP); if (alist == NULL) { freeb(hmp); SCTP_KSTAT(sctps, sctp_cl_assoc_change); return; } } if (dcount > 0) { dsize = sizeof (in6_addr_t) * dcount; dlist = kmem_alloc(dsize, KM_NOSLEEP); if (dlist == NULL) { if (acount > 0) kmem_free(alist, asize); freeb(hmp); SCTP_KSTAT(sctps, sctp_cl_assoc_change); return; } } aptr = alist; dptr = dlist; /* * We will get the actual count when we process * the chunk. */ acount = 0; dcount = 0; } } cont = 1; while (rlen > 0 && cont) { in6_addr_t addr; /* Sanity checks */ if (rlen < sizeof (*ph)) break; plen = ntohs(ph->sph_len); if (plen < sizeof (*ph) || plen > rlen) { break; } idp = (uint32_t *)(ph + 1); dp = sctp_lookup_asconf_dispatch(ntohs(ph->sph_type)); ASSERT(dp); if (dp->asconf) { mp = dp->asconf(sctp, ph, *idp, fp, &cont, act, &addr); if (cont == -1) { /* * Not even enough memory to create * an out-of-resources error. Free * everything and return; the peer * should retransmit. */ freemsg(hmp); if (alist != NULL) kmem_free(alist, asize); if (dlist != NULL) kmem_free(dlist, dsize); return; } if (mp != NULL) { linkb(hmp, mp); } else if (act != 0) { /* update the add/delete list */ if (cl_sctp_assoc_change != NULL) { if (ph->sph_type == htons(PARM_ADD_IP)) { ASSERT(alist != NULL); bcopy(&addr, aptr, sizeof (addr)); aptr += sizeof (addr); acount++; } else if (ph->sph_type == htons(PARM_DEL_IP)) { ASSERT(dlist != NULL); bcopy(&addr, dptr, sizeof (addr)); dptr += sizeof (addr); dcount++; } } } } ph = sctp_next_parm(ph, &rlen); if (ph == NULL) break; } /* * Update clustering's state for this assoc. Note acount/dcount * could be zero (i.e. if the add/delete address(es) were not * processed successfully). Regardless, if the ?size is > 0, * it is the clustering module's responsibility to free the lists. */ if (cl_sctp_assoc_change != NULL) { (*cl_sctp_assoc_change)(sctp->sctp_connp->conn_family, alist, asize, acount, dlist, dsize, dcount, SCTP_CL_PADDR, (cl_sctp_handle_t)sctp); /* alist and dlist will be freed by the clustering module */ } /* Now that the params have been processed, increment the fcsn */ if (act) { sctp->sctp_fcsn++; } BUMP_LOCAL(sctp->sctp_obchunks); if (fp->isv4) ach->sch_len = htons(msgdsize(hmp) - sctp->sctp_hdr_len); else ach->sch_len = htons(msgdsize(hmp) - sctp->sctp_hdr6_len); sctp_set_iplen(sctp, hmp, fp->ixa); (void) conn_ip_output(hmp, fp->ixa); BUMP_LOCAL(sctp->sctp_opkts); sctp_validate_peer(sctp); }
/* ARGSUSED */ static void pfp_packet(void *arg, mac_resource_handle_t mrh, mblk_t *mp, boolean_t flag) { struct T_unitdata_ind *tunit; struct sockaddr_ll *sll; struct sockaddr_ll *sol; mac_header_info_t hdr; struct pfpsock *ps; size_t tusz; mblk_t *mp0; int error; if (mp == NULL) return; ps = arg; if (ps->ps_flow_ctrld) { ps->ps_flow_ctrl_drops++; ps->ps_stats.tp_drops++; ks_stats.kp_recv_flow_cntrld.value.ui64++; freemsg(mp); return; } if (mac_header_info(ps->ps_mh, mp, &hdr) != 0) { /* * Can't decode the packet header information so drop it. */ ps->ps_stats.tp_drops++; ks_stats.kp_recv_mac_hdr_fail.value.ui64++; freemsg(mp); return; } if (mac_type(ps->ps_mh) == DL_ETHER && hdr.mhi_bindsap == ETHERTYPE_VLAN) { struct ether_vlan_header *evhp; struct ether_vlan_header evh; hdr.mhi_hdrsize = sizeof (struct ether_vlan_header); hdr.mhi_istagged = B_TRUE; if (MBLKL(mp) >= sizeof (*evhp)) { evhp = (struct ether_vlan_header *)mp->b_rptr; } else { int sz = sizeof (*evhp); char *s = (char *)&evh; mblk_t *tmp; int len; for (tmp = mp; sz > 0 && tmp != NULL; tmp = tmp->b_cont) { len = min(sz, MBLKL(tmp)); bcopy(tmp->b_rptr, s, len); sz -= len; } evhp = &evh; } hdr.mhi_tci = ntohs(evhp->ether_tci); hdr.mhi_bindsap = ntohs(evhp->ether_type); } if ((ps->ps_proto != 0) && (ps->ps_proto != hdr.mhi_bindsap)) { /* * The packet is not of interest to this socket so * drop it on the floor. Here the SAP is being used * as a very course filter. */ ps->ps_stats.tp_drops++; ks_stats.kp_recv_bad_proto.value.ui64++; freemsg(mp); return; } /* * This field is not often set, even for ethernet, * by mac_header_info, so compute it if it is 0. */ if (hdr.mhi_pktsize == 0) hdr.mhi_pktsize = msgdsize(mp); /* * If a BPF filter is present, pass the raw packet into that. * A failed match will result in zero being returned, indicating * that this socket is not interested in the packet. */ if (ps->ps_bpf.bf_len != 0) { uchar_t *buffer; int buflen; buflen = MBLKL(mp); if (hdr.mhi_pktsize == buflen) { buffer = mp->b_rptr; } else { buflen = 0; buffer = (uchar_t *)mp; } rw_enter(&ps->ps_bpflock, RW_READER); if (bpf_filter(ps->ps_bpf.bf_insns, buffer, hdr.mhi_pktsize, buflen) == 0) { rw_exit(&ps->ps_bpflock); ps->ps_stats.tp_drops++; ks_stats.kp_recv_filtered.value.ui64++; freemsg(mp); return; } rw_exit(&ps->ps_bpflock); } if (ps->ps_type == SOCK_DGRAM) { /* * SOCK_DGRAM socket expect a "layer 3" packet, so advance * past the link layer header. */ mp->b_rptr += hdr.mhi_hdrsize; hdr.mhi_pktsize -= hdr.mhi_hdrsize; } tusz = sizeof (struct T_unitdata_ind) + sizeof (struct sockaddr_ll); if (ps->ps_auxdata) { tusz += _TPI_ALIGN_TOPT(sizeof (struct tpacket_auxdata)); tusz += _TPI_ALIGN_TOPT(sizeof (struct T_opthdr)); } /* * It is tempting to think that this could be optimised by having * the base mblk_t allocated and hung off the pfpsock structure, * except that then another one would need to be allocated for the * sockaddr_ll that is included. Even creating a template to copy * from is of questionable value, as read-write from one structure * to the other is going to be slower than all of the initialisation. */ mp0 = allocb(tusz, BPRI_HI); if (mp0 == NULL) { ps->ps_stats.tp_drops++; ks_stats.kp_recv_alloc_fail.value.ui64++; freemsg(mp); return; } (void) memset(mp0->b_rptr, 0, tusz); mp0->b_datap->db_type = M_PROTO; mp0->b_wptr = mp0->b_rptr + tusz; tunit = (struct T_unitdata_ind *)mp0->b_rptr; tunit->PRIM_type = T_UNITDATA_IND; tunit->SRC_length = sizeof (struct sockaddr); tunit->SRC_offset = sizeof (*tunit); sol = (struct sockaddr_ll *)&ps->ps_sock; sll = (struct sockaddr_ll *)(mp0->b_rptr + sizeof (*tunit)); sll->sll_ifindex = sol->sll_ifindex; sll->sll_hatype = (uint16_t)hdr.mhi_origsap; sll->sll_halen = sol->sll_halen; if (hdr.mhi_saddr != NULL) (void) memcpy(sll->sll_addr, hdr.mhi_saddr, sll->sll_halen); switch (hdr.mhi_dsttype) { case MAC_ADDRTYPE_MULTICAST : sll->sll_pkttype = PACKET_MULTICAST; break; case MAC_ADDRTYPE_BROADCAST : sll->sll_pkttype = PACKET_BROADCAST; break; case MAC_ADDRTYPE_UNICAST : if (memcmp(sol->sll_addr, hdr.mhi_daddr, sol->sll_halen) == 0) sll->sll_pkttype = PACKET_HOST; else sll->sll_pkttype = PACKET_OTHERHOST; break; } if (ps->ps_auxdata) { struct tpacket_auxdata *aux; struct T_opthdr *topt; tunit->OPT_offset = _TPI_ALIGN_TOPT(tunit->SRC_offset + sizeof (struct sockaddr_ll)); tunit->OPT_length = _TPI_ALIGN_TOPT(sizeof (struct T_opthdr)) + _TPI_ALIGN_TOPT(sizeof (struct tpacket_auxdata)); topt = (struct T_opthdr *)(mp0->b_rptr + tunit->OPT_offset); aux = (struct tpacket_auxdata *) ((char *)topt + _TPI_ALIGN_TOPT(sizeof (*topt))); topt->len = tunit->OPT_length; topt->level = SOL_PACKET; topt->name = PACKET_AUXDATA; topt->status = 0; /* * libpcap doesn't seem to use any other field, * so it isn't clear how they should be filled in. */ aux->tp_vlan_vci = hdr.mhi_tci; } linkb(mp0, mp); ps->ps_upcalls->su_recv(ps->ps_upper, mp0, hdr.mhi_pktsize, 0, &error, NULL); if (error == 0) { ps->ps_stats.tp_packets++; ks_stats.kp_recv_ok.value.ui64++; } else { mutex_enter(&ps->ps_lock); if (error == ENOSPC) { ps->ps_upcalls->su_recv(ps->ps_upper, NULL, 0, 0, &error, NULL); if (error == ENOSPC) ps->ps_flow_ctrld = B_TRUE; } mutex_exit(&ps->ps_lock); ps->ps_stats.tp_drops++; ks_stats.kp_recv_fail.value.ui64++; } }
/* ARGSUSED */ static void sctp_notify(sctp_t *sctp, mblk_t *emp, size_t len) { struct T_unitdata_ind *tudi; mblk_t *mp; sctp_faddr_t *fp; int32_t rwnd = 0; int error; conn_t *connp = sctp->sctp_connp; if ((mp = allocb(sizeof (*tudi) + sizeof (void *) + sizeof (struct sockaddr_in6), BPRI_HI)) == NULL) { /* XXX trouble: don't want to drop events. should queue it. */ freemsg(emp); return; } dprint(3, ("sctp_notify: event %d\n", (*(uint16_t *)emp->b_rptr))); mp->b_datap->db_type = M_PROTO; mp->b_flag |= MSGMARK; mp->b_rptr += sizeof (void *); /* pointer worth of padding */ tudi = (struct T_unitdata_ind *)mp->b_rptr; tudi->PRIM_type = T_UNITDATA_IND; tudi->SRC_offset = sizeof (*tudi); tudi->OPT_length = 0; tudi->OPT_offset = 0; fp = sctp->sctp_primary; ASSERT(fp); /* * Fill in primary remote address. */ if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { struct sockaddr_in *sin4; tudi->SRC_length = sizeof (*sin4); sin4 = (struct sockaddr_in *)(tudi + 1); sin4->sin_family = AF_INET; sin4->sin_port = connp->conn_fport; IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr); mp->b_wptr = (uchar_t *)(sin4 + 1); } else { struct sockaddr_in6 *sin6; tudi->SRC_length = sizeof (*sin6); sin6 = (struct sockaddr_in6 *)(tudi + 1); sin6->sin6_family = AF_INET6; sin6->sin6_port = connp->conn_fport; sin6->sin6_addr = fp->faddr; mp->b_wptr = (uchar_t *)(sin6 + 1); } mp->b_cont = emp; /* * Notifications are queued regardless of socket rx space. So * we do not decrement sctp_rwnd here as this will confuse the * other side. */ #ifdef DEBUG for (emp = mp->b_cont; emp; emp = emp->b_cont) { rwnd += emp->b_wptr - emp->b_rptr; } ASSERT(len == rwnd); #endif /* * Override b_flag for SCTP sockfs internal use */ mp->b_flag = (short)SCTP_NOTIFICATION; rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, mp, msgdsize(mp), 0, &error, NULL); if (rwnd > sctp->sctp_rwnd) { sctp->sctp_rwnd = rwnd; } }
/* Nothing to do on rtcp packets, just return packet length */ static int ms_zrtp_rtcp_process_on_receive(struct _RtpTransportModifier *t, mblk_t *msg) { return msgdsize(msg); }
static void dec_process(MSFilter *f){ DecState *s=(DecState*)f->data; mblk_t *im,*om; int nbytes; float samples[BLOCKL_MAX]={0}; int i; while ((im=ms_queue_get(f->inputs[0]))!=NULL){ nbytes=msgdsize(im); if (nbytes==0 || (nbytes%38!=0 && nbytes%50!=0)){ freemsg(im); continue; } if (nbytes%38==0 && s->nbytes!=NO_OF_BYTES_20MS) { /* not yet configured, or misconfigured */ s->ms_per_frame=20; s->nbytes=NO_OF_BYTES_20MS; s->nsamples=BLOCKL_20MS; s->ready=TRUE; initDecode(&s->ilbc_dec,s->ms_per_frame,s->postfilter); } else if (nbytes%50==0 && s->nbytes!=NO_OF_BYTES_30MS) { /* not yet configured, or misconfigured */ s->ms_per_frame=30; s->nbytes=NO_OF_BYTES_30MS; s->nsamples=BLOCKL_30MS; s->ready=TRUE; initDecode(&s->ilbc_dec,s->ms_per_frame,s->postfilter); } if (s->nbytes>0 && nbytes>=s->nbytes){ int frame_per_packet = nbytes/s->nbytes; int k; int plctime; for (k=0;k<frame_per_packet;k++) { om=allocb(s->nsamples*2,0); iLBC_decode(samples,(uint8_t*)im->b_rptr+(k*s->nbytes),&s->ilbc_dec,1); for (i=0;i<s->nsamples;i++,om->b_wptr+=2){ *((int16_t*)om->b_wptr)=samples[i]; } mblk_meta_copy(im,om); ms_queue_put(f->outputs[0],om); } if (s->plcctx){ plctime=ms_concealer_inc_sample_time(s->plcctx,f->ticker->time,frame_per_packet*s->ms_per_frame,1); if (plctime>0){ ms_warning("ilbc: did plc during %i ms",plctime); } } }else{ ms_warning("bad iLBC frame !"); } freemsg(im); } if (s->plcctx && s->ready && ms_concealer_context_is_concealement_required(s->plcctx,f->ticker->time)){ om=allocb(s->nsamples*2,0); iLBC_decode(samples,(uint8_t*)NULL,&s->ilbc_dec,0 /*PLC*/); for (i=0;i<s->nsamples;i++,om->b_wptr+=2){ *((int16_t*)om->b_wptr)=samples[i]; } mblk_set_plc_flag(om,TRUE); ms_queue_put(f->outputs[0],om); ms_concealer_inc_sample_time(s->plcctx,f->ticker->time,s->ms_per_frame,0); } }
static void jpg_process(MSFilter *f){ JpegWriter *s=(JpegWriter*)f->data; ms_filter_lock(f); if (s->file!=NULL && s->codec!=NULL){ MSPicture yuvbuf, yuvjpeg; mblk_t *m=ms_queue_peek_last(f->inputs[0]); if (ms_yuv_buf_init_from_mblk(&yuvbuf,m)==0){ int error,got_pict; int comp_buf_sz=msgdsize(m); uint8_t *comp_buf=(uint8_t*)ms_malloc0(comp_buf_sz); mblk_t *jpegm; struct SwsContext *sws_ctx; struct AVPacket packet; AVCodecContext *avctx=avcodec_alloc_context3(s->codec); memset(&packet, 0, sizeof(packet)); avctx->width=yuvbuf.w; avctx->height=yuvbuf.h; avctx->time_base.num = 1; avctx->time_base.den =1; avctx->pix_fmt=AV_PIX_FMT_YUVJ420P; error=avcodec_open2(avctx,s->codec,NULL); if (error!=0) { ms_error("avcodec_open() failed: %i",error); cleanup(s,NULL, FALSE); av_free(avctx); goto end; } sws_ctx=sws_getContext(avctx->width,avctx->height,AV_PIX_FMT_YUV420P, avctx->width,avctx->height,avctx->pix_fmt,SWS_FAST_BILINEAR,NULL, NULL, NULL); if (sws_ctx==NULL) { ms_error(" sws_getContext() failed."); cleanup(s,avctx, FALSE); goto end; } jpegm=ms_yuv_buf_alloc (&yuvjpeg,avctx->width, avctx->height); #if LIBSWSCALE_VERSION_INT >= AV_VERSION_INT(0,9,0) if (sws_scale(sws_ctx,(const uint8_t *const*)yuvbuf.planes,yuvbuf.strides,0,avctx->height,yuvjpeg.planes,yuvjpeg.strides)<0){ #else if (sws_scale(sws_ctx,(uint8_t **)yuvbuf.planes,yuvbuf.strides,0,avctx->height,yuvjpeg.planes,yuvjpeg.strides)<0){ #endif ms_error("sws_scale() failed."); sws_freeContext(sws_ctx); cleanup(s,avctx, FALSE); freemsg(jpegm); goto end; } sws_freeContext(sws_ctx); av_frame_unref(s->pict); avpicture_fill((AVPicture*)s->pict,(uint8_t*)jpegm->b_rptr,avctx->pix_fmt,avctx->width,avctx->height); packet.data=comp_buf; packet.size=comp_buf_sz; error=avcodec_encode_video2(avctx, &packet, s->pict, &got_pict); if (error<0){ ms_error("Could not encode jpeg picture."); }else{ if (fwrite(comp_buf,packet.size,1,s->file)>0){ ms_message("Snapshot done"); }else{ ms_error("Error writing snapshot."); } } ms_free(comp_buf); cleanup(s,avctx, TRUE); freemsg(jpegm); } goto end; } end: ms_filter_unlock(f); ms_queue_flush(f->inputs[0]); } static MSFilterMethod jpg_methods[]={ { MS_JPEG_WRITER_TAKE_SNAPSHOT, take_snapshot }, { 0,NULL} }; #ifndef _MSC_VER MSFilterDesc ms_jpeg_writer_desc={ .id=MS_JPEG_WRITER_ID, .name="MSJpegWriter", .text="Take a video snapshot as jpg file", .category=MS_FILTER_OTHER, .ninputs=1, .noutputs=0, .init=jpg_init, .process=jpg_process, .uninit=jpg_uninit, .methods=jpg_methods }; #else MSFilterDesc ms_jpeg_writer_desc={ MS_JPEG_WRITER_ID, "MSJpegWriter", "Take a video snapshot as jpg file", MS_FILTER_OTHER, NULL, 1, 0, jpg_init, NULL, jpg_process, NULL, jpg_uninit, jpg_methods }; #endif MS_FILTER_DESC_EXPORT(ms_jpeg_writer_desc)
/* * tcp_input_data() calls this routine for all packet destined to a * connection to the SSL port, when the SSL kernel proxy is configured * to intercept and process those packets. * A packet may carry multiple SSL records, so the function * calls kssl_input() in a loop, until all records are * handled. * As long as this connection is in handshake, that is until the first * time kssl_input() returns a record to be delivered ustreams, * we maintain the tcp_kssl_inhandshake, and keep an extra reference on * the tcp/connp across the call to kssl_input(). The reason is, that * function may return KSSL_CMD_QUEUED after scheduling an asynchronous * request and cause tcp_kssl_callback() to be called on a different CPU, * which could decrement the conn/tcp reference before we get to increment it. */ void tcp_kssl_input(tcp_t *tcp, mblk_t *mp, cred_t *cr) { struct conn_s *connp = tcp->tcp_connp; tcp_t *listener; mblk_t *ind_mp; kssl_cmd_t kssl_cmd; mblk_t *outmp; struct T_conn_ind *tci; boolean_t more = B_FALSE; boolean_t conn_held = B_FALSE; boolean_t is_v4; void *addr; if (is_system_labeled() && mp != NULL) { ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); /* * Provide for protocols above TCP such as RPC. NOPID leaves * db_cpid unchanged. * The cred could have already been set. */ if (cr != NULL) mblk_setcred(mp, cr, NOPID); } /* First time here, allocate the SSL context */ if (tcp->tcp_kssl_ctx == NULL) { ASSERT(tcp->tcp_kssl_pending); is_v4 = (connp->conn_ipversion == IPV4_VERSION); if (is_v4) { addr = &connp->conn_faddr_v4; } else { addr = &connp->conn_faddr_v6; } if (kssl_init_context(tcp->tcp_kssl_ent, addr, is_v4, tcp->tcp_mss, &(tcp->tcp_kssl_ctx)) != KSSL_STS_OK) { tcp->tcp_kssl_pending = B_FALSE; kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); tcp->tcp_kssl_ent = NULL; goto no_can_do; } tcp->tcp_kssl_inhandshake = B_TRUE; /* we won't be needing this one after now */ kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); tcp->tcp_kssl_ent = NULL; } if (tcp->tcp_kssl_inhandshake) { CONN_INC_REF(connp); conn_held = B_TRUE; } do { kssl_cmd = kssl_input(tcp->tcp_kssl_ctx, mp, &outmp, &more, tcp_kssl_input_callback, (void *)tcp); switch (kssl_cmd) { case KSSL_CMD_SEND: DTRACE_PROBE(kssl_cmd_send); /* * We need to increment tcp_squeue_bytes to account * for the extra bytes internally injected to the * outgoing flow. tcp_output() will decrement it * as they are sent out. */ mutex_enter(&tcp->tcp_non_sq_lock); tcp->tcp_squeue_bytes += msgdsize(outmp); mutex_exit(&tcp->tcp_non_sq_lock); tcp_output(connp, outmp, NULL, NULL); /* FALLTHROUGH */ case KSSL_CMD_NONE: DTRACE_PROBE(kssl_cmd_none); if (tcp->tcp_kssl_pending) { mblk_t *ctxmp; /* * SSL handshake successfully started - * pass up the T_CONN_IND */ mp = NULL; listener = tcp->tcp_listener; tcp->tcp_kssl_pending = B_FALSE; ind_mp = tcp->tcp_conn.tcp_eager_conn_ind; ASSERT(ind_mp != NULL); ctxmp = allocb(sizeof (kssl_ctx_t), BPRI_MED); /* * Give this session a chance to fall back to * userland SSL */ if (ctxmp == NULL) goto no_can_do; /* * attach the kssl_ctx to the conn_ind and * transform it to a T_SSL_PROXY_CONN_IND. * Hold it so that it stays valid till it * reaches the stream head. */ kssl_hold_ctx(tcp->tcp_kssl_ctx); *((kssl_ctx_t *)ctxmp->b_rptr) = tcp->tcp_kssl_ctx; ctxmp->b_wptr = ctxmp->b_rptr + sizeof (kssl_ctx_t); ind_mp->b_cont = ctxmp; tci = (struct T_conn_ind *)ind_mp->b_rptr; tci->PRIM_type = T_SSL_PROXY_CONN_IND; /* * The code below is copied from tcp_input_data * delivering the T_CONN_IND on a TCPS_SYN_RCVD, * and all conn ref cnt comments apply. */ tcp->tcp_conn.tcp_eager_conn_ind = NULL; tcp->tcp_tconnind_started = B_TRUE; CONN_INC_REF(connp); CONN_INC_REF(listener->tcp_connp); if (listener->tcp_connp->conn_sqp == connp->conn_sqp) { tcp_send_conn_ind(listener->tcp_connp, ind_mp, listener->tcp_connp->conn_sqp); CONN_DEC_REF(listener->tcp_connp); } else { SQUEUE_ENTER_ONE( listener->tcp_connp->conn_sqp, ind_mp, tcp_send_conn_ind, listener->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_CONN_IND); } } break; case KSSL_CMD_QUEUED: DTRACE_PROBE(kssl_cmd_queued); /* * We hold the conn_t here because an asynchronous * request have been queued and * tcp_kssl_input_callback() will be called later. * It will release the conn_t */ CONN_INC_REF(connp); break; case KSSL_CMD_DELIVER_PROXY: case KSSL_CMD_DELIVER_SSL: DTRACE_PROBE(kssl_cmd_proxy__ssl); /* * Keep accumulating if not yet accepted. */ if (tcp->tcp_listener != NULL) { DTRACE_PROBE1(kssl_mblk__input_rcv_enqueue, mblk_t *, outmp); tcp_rcv_enqueue(tcp, outmp, msgdsize(outmp), NULL); } else { DTRACE_PROBE1(kssl_mblk__input_putnext, mblk_t *, outmp); putnext(connp->conn_rq, outmp); } /* * We're at a phase where records are sent upstreams, * past the handshake */ tcp->tcp_kssl_inhandshake = B_FALSE; break; case KSSL_CMD_NOT_SUPPORTED: DTRACE_PROBE(kssl_cmd_not_supported); /* * Stop the SSL processing by the proxy, and * switch to the userland SSL */ if (tcp->tcp_kssl_pending) { tcp->tcp_kssl_pending = B_FALSE; no_can_do: DTRACE_PROBE1(kssl_no_can_do, tcp_t *, tcp); listener = tcp->tcp_listener; ind_mp = tcp->tcp_conn.tcp_eager_conn_ind; ASSERT(ind_mp != NULL); if (tcp->tcp_kssl_ctx != NULL) { kssl_release_ctx(tcp->tcp_kssl_ctx); tcp->tcp_kssl_ctx = NULL; } /* * Make this a T_SSL_PROXY_CONN_IND, for the * stream head to deliver it to the SSL * fall-back listener */ tci = (struct T_conn_ind *)ind_mp->b_rptr; tci->PRIM_type = T_SSL_PROXY_CONN_IND; /* * The code below is copied from tcp_input_data * delivering the T_CONN_IND on a TCPS_SYN_RCVD, * and all conn ref cnt comments apply. */ tcp->tcp_conn.tcp_eager_conn_ind = NULL; tcp->tcp_tconnind_started = B_TRUE; CONN_INC_REF(connp); CONN_INC_REF(listener->tcp_connp); if (listener->tcp_connp->conn_sqp == connp->conn_sqp) { tcp_send_conn_ind(listener->tcp_connp, ind_mp, listener->tcp_connp->conn_sqp); CONN_DEC_REF(listener->tcp_connp); } else { SQUEUE_ENTER_ONE( listener->tcp_connp->conn_sqp, ind_mp, tcp_send_conn_ind, listener->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_CONN_IND); } } if (mp != NULL) tcp_rcv_enqueue(tcp, mp, msgdsize(mp), NULL); break; } mp = NULL; } while (more); if (conn_held) { CONN_DEC_REF(connp); } }
/* * Callback function for the cases kssl_input() had to submit an asynchronous * job and need to come back when done to carry on the input processing. * This routine follows the conventions of timeout and interrupt handlers. * (no blocking, ...) */ static void tcp_kssl_input_callback(void *arg, mblk_t *mp, kssl_cmd_t kssl_cmd) { tcp_t *tcp = (tcp_t *)arg; conn_t *connp; mblk_t *sqmp; ASSERT(tcp != NULL); connp = tcp->tcp_connp; ASSERT(connp != NULL); switch (kssl_cmd) { case KSSL_CMD_SEND: /* I'm coming from an outside perimeter */ if (mp != NULL) { /* * See comment in tcp_kssl_input() call to tcp_output() */ mutex_enter(&tcp->tcp_non_sq_lock); tcp->tcp_squeue_bytes += msgdsize(mp); mutex_exit(&tcp->tcp_non_sq_lock); } CONN_INC_REF(connp); SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output, connp, NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT); /* FALLTHROUGH */ case KSSL_CMD_NONE: break; case KSSL_CMD_DELIVER_PROXY: case KSSL_CMD_DELIVER_SSL: /* * Keep accumulating if not yet accepted. */ if (tcp->tcp_listener != NULL) { tcp_rcv_enqueue(tcp, mp, msgdsize(mp), NULL); } else { putnext(connp->conn_rq, mp); } break; case KSSL_CMD_NOT_SUPPORTED: /* Stop the SSL processing */ kssl_release_ctx(tcp->tcp_kssl_ctx); tcp->tcp_kssl_ctx = NULL; } /* * Process any input that may have accumulated while we're waiting for * the call-back. * We need to re-enter the squeue for this connp, and a new mp is * necessary. */ if ((sqmp = allocb(1, BPRI_MED)) != NULL) { CONN_INC_REF(connp); SQUEUE_ENTER_ONE(connp->conn_sqp, sqmp, tcp_kssl_input_asynch, connp, NULL, SQ_FILL, SQTAG_TCP_KSSL_INPUT); } else { DTRACE_PROBE(kssl_err__allocb_failed); } CONN_DEC_REF(connp); }
/* * dm2s_transmit - Transmit a message. */ int dm2s_transmit(queue_t *wq, mblk_t *mp, target_id_t target, mkey_t key) { dm2s_t *dm2sp = (dm2s_t *)wq->q_ptr; int ret; uint32_t len; uint32_t numsg; DPRINTF(DBG_DRV, ("dm2s_transmit: called\n")); ASSERT(dm2sp != NULL); ASSERT(MUTEX_HELD(&dm2sp->ms_lock)); /* * Free the message if the mailbox is not in the connected state. */ if (!DM2S_MBOX_READY(dm2sp)) { DPRINTF(DBG_MBOX, ("dm2s_transmit: mailbox not ready yet\n")); freemsg(mp); return (EIO); } len = msgdsize(mp); if (len > dm2sp->ms_mtu) { /* * Size is too big to send, free the message. */ DPRINTF(DBG_MBOX, ("dm2s_transmit: message too large\n")); DTRACE_PROBE2(dm2s_msg_too_big, dm2s_t, dm2sp, uint32_t, len); freemsg(mp); return (0); } if ((ret = dm2s_prep_scatgath(mp, &numsg, dm2sp->ms_sg_tx, DM2S_MAX_SG)) != 0) { DPRINTF(DBG_MBOX, ("dm2s_transmit: prep_scatgath failed\n")); putbq(wq, mp); return (EAGAIN); } DPRINTF(DBG_MBOX, ("dm2s_transmit: calling mb_putmsg numsg=%d len=%d\n", numsg, len)); ret = scf_mb_putmsg(target, key, len, numsg, dm2sp->ms_sg_tx, 0); if (ret == EBUSY || ret == ENOSPC) { DPRINTF(DBG_MBOX, ("dm2s_transmit: mailbox busy ret=%d\n", ret)); if (++dm2sp->ms_retries >= DM2S_MAX_RETRIES) { /* * If maximum retries are reached, then free the * message. */ DPRINTF(DBG_MBOX, ("dm2s_transmit: freeing msg after max retries\n")); DTRACE_PROBE2(dm2s_retry_fail, dm2s_t, dm2sp, int, ret); freemsg(mp); dm2sp->ms_retries = 0; return (0); } DTRACE_PROBE2(dm2s_mb_busy, dm2s_t, dm2sp, int, ret); /* * Queue it back, so that we can retry again. */ putbq(wq, mp); return (ret); } DMPBYTES("dm2s: Putmsg: ", len, numsg, dm2sp->ms_sg_tx); dm2sp->ms_retries = 0; freemsg(mp); DPRINTF(DBG_DRV, ("dm2s_transmit: ret=%d\n", ret)); return (ret); }
/* * Return SNMP stuff in buffer in mpdata. */ mblk_t * tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req) { mblk_t *mpdata; mblk_t *mp_conn_ctl = NULL; mblk_t *mp_conn_tail; mblk_t *mp_attr_ctl = NULL; mblk_t *mp_attr_tail; mblk_t *mp6_conn_ctl = NULL; mblk_t *mp6_conn_tail; mblk_t *mp6_attr_ctl = NULL; mblk_t *mp6_attr_tail; struct opthdr *optp; mib2_tcpConnEntry_t tce; mib2_tcp6ConnEntry_t tce6; mib2_transportMLPEntry_t mlp; connf_t *connfp; int i; boolean_t ispriv; zoneid_t zoneid; int v4_conn_idx; int v6_conn_idx; conn_t *connp = Q_TO_CONN(q); tcp_stack_t *tcps; ip_stack_t *ipst; mblk_t *mp2ctl; mib2_tcp_t tcp_mib; size_t tcp_mib_size, tce_size, tce6_size; /* * make a copy of the original message */ mp2ctl = copymsg(mpctl); if (mpctl == NULL || (mpdata = mpctl->b_cont) == NULL || (mp_conn_ctl = copymsg(mpctl)) == NULL || (mp_attr_ctl = copymsg(mpctl)) == NULL || (mp6_conn_ctl = copymsg(mpctl)) == NULL || (mp6_attr_ctl = copymsg(mpctl)) == NULL) { freemsg(mp_conn_ctl); freemsg(mp_attr_ctl); freemsg(mp6_conn_ctl); freemsg(mp6_attr_ctl); freemsg(mpctl); freemsg(mp2ctl); return (NULL); } ipst = connp->conn_netstack->netstack_ip; tcps = connp->conn_netstack->netstack_tcp; if (legacy_req) { tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t); tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t); tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t); } else { tcp_mib_size = sizeof (mib2_tcp_t); tce_size = sizeof (mib2_tcpConnEntry_t); tce6_size = sizeof (mib2_tcp6ConnEntry_t); } bzero(&tcp_mib, sizeof (tcp_mib)); /* build table of connections -- need count in fixed part */ SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */ SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); SET_MIB(tcp_mib.tcpMaxConn, -1); SET_MIB(tcp_mib.tcpCurrEstab, 0); ispriv = secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; zoneid = Q_TO_CONN(q)->conn_zoneid; v4_conn_idx = v6_conn_idx = 0; mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; for (i = 0; i < CONN_G_HASH_SIZE; i++) { ipst = tcps->tcps_netstack->netstack_ip; connfp = &ipst->ips_ipcl_globalhash_fanout[i]; connp = NULL; while ((connp = ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { tcp_t *tcp; boolean_t needattr; if (connp->conn_zoneid != zoneid) continue; /* not in this zone */ tcp = connp->conn_tcp; TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs); tcp->tcp_ibsegs = 0; TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs); tcp->tcp_obsegs = 0; tce6.tcp6ConnState = tce.tcpConnState = tcp_snmp_state(tcp); if (tce.tcpConnState == MIB2_TCP_established || tce.tcpConnState == MIB2_TCP_closeWait) BUMP_MIB(&tcp_mib, tcpCurrEstab); needattr = B_FALSE; bzero(&mlp, sizeof (mlp)); if (connp->conn_mlp_type != mlptSingle) { if (connp->conn_mlp_type == mlptShared || connp->conn_mlp_type == mlptBoth) mlp.tme_flags |= MIB2_TMEF_SHARED; if (connp->conn_mlp_type == mlptPrivate || connp->conn_mlp_type == mlptBoth) mlp.tme_flags |= MIB2_TMEF_PRIVATE; needattr = B_TRUE; } if (connp->conn_anon_mlp) { mlp.tme_flags |= MIB2_TMEF_ANONMLP; needattr = B_TRUE; } switch (connp->conn_mac_mode) { case CONN_MAC_DEFAULT: break; case CONN_MAC_AWARE: mlp.tme_flags |= MIB2_TMEF_MACEXEMPT; needattr = B_TRUE; break; case CONN_MAC_IMPLICIT: mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT; needattr = B_TRUE; break; } if (connp->conn_ixa->ixa_tsl != NULL) { ts_label_t *tsl; tsl = connp->conn_ixa->ixa_tsl; mlp.tme_flags |= MIB2_TMEF_IS_LABELED; mlp.tme_doi = label2doi(tsl); mlp.tme_label = *label2bslabel(tsl); needattr = B_TRUE; } /* Create a message to report on IPv6 entries */ if (connp->conn_ipversion == IPV6_VERSION) { tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6; tce6.tcp6ConnRemAddress = connp->conn_faddr_v6; tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport); tce6.tcp6ConnRemPort = ntohs(connp->conn_fport); if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) { tce6.tcp6ConnIfIndex = connp->conn_ixa->ixa_scopeid; } else { tce6.tcp6ConnIfIndex = connp->conn_bound_if; } /* Don't want just anybody seeing these... */ if (ispriv) { tce6.tcp6ConnEntryInfo.ce_snxt = tcp->tcp_snxt; tce6.tcp6ConnEntryInfo.ce_suna = tcp->tcp_suna; tce6.tcp6ConnEntryInfo.ce_rnxt = tcp->tcp_rnxt; tce6.tcp6ConnEntryInfo.ce_rack = tcp->tcp_rack; } else { /* * Netstat, unfortunately, uses this to * get send/receive queue sizes. How to fix? * Why not compute the difference only? */ tce6.tcp6ConnEntryInfo.ce_snxt = tcp->tcp_snxt - tcp->tcp_suna; tce6.tcp6ConnEntryInfo.ce_suna = 0; tce6.tcp6ConnEntryInfo.ce_rnxt = tcp->tcp_rnxt - tcp->tcp_rack; tce6.tcp6ConnEntryInfo.ce_rack = 0; } tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; tce6.tcp6ConnCreationProcess = (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS : connp->conn_cpid; tce6.tcp6ConnCreationTime = connp->conn_open_time; (void) snmp_append_data2(mp6_conn_ctl->b_cont, &mp6_conn_tail, (char *)&tce6, tce6_size); mlp.tme_connidx = v6_conn_idx++; if (needattr) (void) snmp_append_data2(mp6_attr_ctl->b_cont, &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); } /* * Create an IPv4 table entry for IPv4 entries and also * for IPv6 entries which are bound to in6addr_any * but don't have IPV6_V6ONLY set. * (i.e. anything an IPv4 peer could connect to) */ if (connp->conn_ipversion == IPV4_VERSION || (tcp->tcp_state <= TCPS_LISTEN && !connp->conn_ipv6_v6only && IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) { if (connp->conn_ipversion == IPV6_VERSION) { tce.tcpConnRemAddress = INADDR_ANY; tce.tcpConnLocalAddress = INADDR_ANY; } else { tce.tcpConnRemAddress = connp->conn_faddr_v4; tce.tcpConnLocalAddress = connp->conn_laddr_v4; } tce.tcpConnLocalPort = ntohs(connp->conn_lport); tce.tcpConnRemPort = ntohs(connp->conn_fport); /* Don't want just anybody seeing these... */ if (ispriv) { tce.tcpConnEntryInfo.ce_snxt = tcp->tcp_snxt; tce.tcpConnEntryInfo.ce_suna = tcp->tcp_suna; tce.tcpConnEntryInfo.ce_rnxt = tcp->tcp_rnxt; tce.tcpConnEntryInfo.ce_rack = tcp->tcp_rack; } else { /* * Netstat, unfortunately, uses this to * get send/receive queue sizes. How * to fix? * Why not compute the difference only? */ tce.tcpConnEntryInfo.ce_snxt = tcp->tcp_snxt - tcp->tcp_suna; tce.tcpConnEntryInfo.ce_suna = 0; tce.tcpConnEntryInfo.ce_rnxt = tcp->tcp_rnxt - tcp->tcp_rack; tce.tcpConnEntryInfo.ce_rack = 0; } tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; tce.tcpConnEntryInfo.ce_state = tcp->tcp_state; tce.tcpConnCreationProcess = (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS : connp->conn_cpid; tce.tcpConnCreationTime = connp->conn_open_time; (void) snmp_append_data2(mp_conn_ctl->b_cont, &mp_conn_tail, (char *)&tce, tce_size); mlp.tme_connidx = v4_conn_idx++; if (needattr) (void) snmp_append_data2( mp_attr_ctl->b_cont, &mp_attr_tail, (char *)&mlp, sizeof (mlp)); } } } tcp_sum_mib(tcps, &tcp_mib); /* Fixed length structure for IPv4 and IPv6 counters */ SET_MIB(tcp_mib.tcpConnTableSize, tce_size); SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size); /* * Synchronize 32- and 64-bit counters. Note that tcpInSegs and * tcpOutSegs are not updated anywhere in TCP. The new 64 bits * counters are used. Hence the old counters' values in tcp_sc_mib * are always 0. */ SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs); SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs); optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; optp->level = MIB2_TCP; optp->name = 0; (void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size); optp->len = msgdsize(mpdata); qreply(q, mpctl); /* table of connections... */ optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ sizeof (struct T_optmgmt_ack)]; optp->level = MIB2_TCP; optp->name = MIB2_TCP_CONN; optp->len = msgdsize(mp_conn_ctl->b_cont); qreply(q, mp_conn_ctl); /* table of MLP attributes... */ optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ sizeof (struct T_optmgmt_ack)]; optp->level = MIB2_TCP; optp->name = EXPER_XPORT_MLP; optp->len = msgdsize(mp_attr_ctl->b_cont); if (optp->len == 0) freemsg(mp_attr_ctl); else qreply(q, mp_attr_ctl); /* table of IPv6 connections... */ optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ sizeof (struct T_optmgmt_ack)]; optp->level = MIB2_TCP6; optp->name = MIB2_TCP6_CONN; optp->len = msgdsize(mp6_conn_ctl->b_cont); qreply(q, mp6_conn_ctl); /* table of IPv6 MLP attributes... */ optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ sizeof (struct T_optmgmt_ack)]; optp->level = MIB2_TCP6; optp->name = EXPER_XPORT_MLP; optp->len = msgdsize(mp6_attr_ctl->b_cont); if (optp->len == 0) freemsg(mp6_attr_ctl); else qreply(q, mp6_attr_ctl); return (mp2ctl); }
/* * MTP_UNITDATA_IND 20 - Connection-less data receive indication * ------------------------------------------------------------------------- * N_UNITDATA_IND (MTP_TRANSFER_IND) * ------------------------------------------------------------------------- * This covers only the MTP-TRANSFER-Indication primitive. * * Let me ask a question here: why dont we just pass the unitdata on to the * ASP and let the ASP translate it into an M3UA message. That way, if there * are different ASPs supporting different versions, that can be handled at * the ASP instead of here. Also, we don't know the transport type. If it * is an SCTP transport, it can do other things with the message, like select * stream. */ static int mtpp_unitdata_ind(queue_t *q, mblk_t *msg) { sls_t *sls; queue_t *wq; mtpp_t *mtp = Q_MTP(q); mblk_t *mp, *db = msg->b_cont; size_t dlen = msgdsize(db); N_unitdata_ind_t *p = (N_unitdata_req_t *) msg->b_rptr; struct mtp_rl *rl = (mtp_rt *) (((caddr_t) p) + p->SRC_offset); static const size_t mlen = M3UA_MHDR_SIZE + M3UA_PARM_SIZE_RC + M3UA_PARM_SIZE_RL + M3UA_PHDR_SIZE; /* * First let's find out where the data is going. The AS should have * this all set up for us in the SLS tables. */ ensure(mtp, return (-EFAULT)); ensure(mtp->rc, return (-EFAULT)); ensure(mtp->rc->as, return (-EFAULT)); sls = &mtp->rc->as->sls[(rl->sls & UA_SLS_MASK)]; ensure(sls->sp, return (-EFAULT)); ensure(sls->sp->lp, return (-EFAULT)); ensure(sls->sp->lp->q, return (-EFAULT)); if (!(sls->flags & UA_SLS_BUFFERING)) if (!(canput((wq = WR(sls->sp->lp->q))))) return (-EBUSY); /* apply backpressure! */ if ((mp = allocb(mlen, BPRI_MED))) { mp->b_datap->db_type = M_DATA; *((uint32_t *) mp->b_wptr)++ = M3UA_MAUP_DATA; *((uint32_t *) mp->b_wptr)++ = htonl(mlen + dlen); *((uint32_t *) mp->b_wptr)++ = M3UA_PARM_NA; *((uint32_t *) mp->b_wptr)++ = htonl(mtp->na); *((uint32_t *) mp->b_wptr)++ = M3UA_PARM_RC; *((uint32_t *) mp->b_wptr)++ = htonl(mtp->rc); /* * A couple of big arguments on what should be in the * messages here... */ *((uint32_t *) mp->b_wptr)++ = M3UA_PARM_RL; *((uint32_t *) mp->b_wptr)++ = hotnl(rl->opc); *((uint32_t *) mp->b_wptr)++ = hotnl(rl->dpc); *((uint8_t *) mp->b_wptr)++ = 0; *((uint8_t *) mp->b_wptr)++ = hotnl(rl->sls); *((uint8_t *) mp->b_wptr)++ = hotnl(rl->ni); *((uint8_t *) mp->b_wptr)++ = hotnl(rl->mp); *((uint32_t *) mp->b_wptr)++ = M3UA_PARM_DATA; mp->b_cont = db; freeb(msg); if (sls->flags & UA_SLS_BUFFERING) /* hold back data for this sls */ bufq_queue(&sls->buf, mp); else putq(wq, mp); return (0); } return (-ENOBUFS); /* try again later */ }
static int sctp_asconf_send(sctp_t *sctp, sctp_asconf_t *asc, sctp_faddr_t *fp, sctp_cl_ainfo_t *ainfo) { mblk_t *mp, *nmp; sctp_chunk_hdr_t *ch; boolean_t isv4; size_t msgsize; ASSERT(asc != NULL && asc->head != NULL); isv4 = (fp != NULL) ? fp->isv4 : sctp->sctp_current->isv4; /* SCTP chunk header + Serial Number + Address Param TLV */ msgsize = sizeof (*ch) + sizeof (uint32_t) + (isv4 ? PARM_ADDR4_LEN : PARM_ADDR6_LEN); mp = allocb(msgsize, BPRI_MED); if (mp == NULL) return (ENOMEM); mp->b_wptr += msgsize; mp->b_cont = asc->head; ch = (sctp_chunk_hdr_t *)mp->b_rptr; ch->sch_id = CHUNK_ASCONF; ch->sch_flags = 0; ch->sch_len = htons(msgdsize(mp)); nmp = msgpullup(mp, -1); if (nmp == NULL) { freeb(mp); return (ENOMEM); } /* * Stash the address list and the count so that when the operation * completes, i.e. when as get an ACK, we can update the clustering's * state for this association. */ if (ainfo != NULL) { ASSERT(cl_sctp_assoc_change != NULL); ASSERT(nmp->b_prev == NULL); nmp->b_prev = (mblk_t *)ainfo; } /* Clean up the temporary mblk chain */ freemsg(mp); asc->head = NULL; asc->cid = 0; /* Queue it ... */ if (sctp->sctp_cxmit_list == NULL) { sctp->sctp_cxmit_list = nmp; } else { linkb(sctp->sctp_cxmit_list, nmp); } BUMP_LOCAL(sctp->sctp_obchunks); /* And try to send it. */ sctp_wput_asconf(sctp, fp); return (0); }