Groupsock* ProxyServerMediaSession::createGroupsock(struct in_addr const& addr, Port port) { // Default implementation; may be redefined by subclasses: return new Groupsock(envir(), addr, port, 255); }
RTCPInstance* ProxyServerMediaSession ::createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */ unsigned char const* cname, RTPSink* sink) { // Default implementation; may be redefined by subclasses: return RTCPInstance::createNew(envir(), RTCPgs, totSessionBW, cname, sink, NULL/*we're a server*/); }
void H264FrameSource::doGetNextFrame() { // 根据 fps, 计算等待时间 double delay = 1000.0 / videoFPS ; int to_delay = delay * 1000; // us if(!m_videoInput) return; BYTE *pic = m_videoInput->GrabFrame(); //Check picture if (!pic) { fFrameSize = 0; m_started = 0; return; } //Check if we need to send intra if (sendFPU) { videoEncoder->FastPictureUpdate(); } //if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: //} else { // Increment by the play time of the previous data: // unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime; // fPresentationTime.tv_sec += uSeconds/1000000; // fPresentationTime.tv_usec = uSeconds%1000000; //} // Remember the play time of this data: //fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize; //fDurationInMicroseconds = fLastPlayTime; //fDurationInMicroseconds = 1000.0 / videoFPS; VideoFrame *videoFrame = videoEncoder->EncodeFrame(pic,m_videoInput->GetBufferSize()); //If was failed if (!videoFrame){ //Next fFrameSize = 0; m_started = 0; Log("-----Error encoding video\n"); double delay = 1000.0 / videoFPS; int to_delay = delay * 1000; // us nextTask() = envir().taskScheduler().scheduleDelayedTask(to_delay, (TaskFunc*)FramedSource::afterGetting, this); return; } if(sendFPU) sendFPU = false; //Set frame timestamp videoFrame->SetTimestamp(getDifTime(&first)/1000); //Set sending time of previous frame //getUpdDifTime(&prev); //gettimeofday(&fPresentationTime, 0); fFrameSize = videoFrame->GetLength(); memmove(fTo, videoFrame->GetData(), fFrameSize); if (fFrameSize > fMaxSize) { fNumTruncatedBytes = fFrameSize - fMaxSize; fFrameSize = fMaxSize; } else { fNumTruncatedBytes = 0; } gettimeofday(&fPresentationTime, NULL); //to_delay = ((1000 / videoFPS) * fFrameSize / RTPPAYLOADSIZE) * 1000; // us nextTask() = envir().taskScheduler().scheduleDelayedTask(to_delay, (TaskFunc*)FramedSource::afterGetting, this); }
Boolean ADUFromMP3Source::doGetNextFrame1() { // First, check whether we have enough previously-read data to output an // ADU for the last-read MP3 frame: unsigned tailIndex; Segment *tailSeg; Boolean needMoreData; if (fSegments->isEmpty()) { needMoreData = True; tailSeg = NULL; tailIndex = 0; // unneeded, but stops compiler warnings } else { tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex()); tailSeg = &(fSegments->s[tailIndex]); needMoreData = fTotalDataSizeBeforePreviousRead < tailSeg->backpointer // bp points back too far || tailSeg->backpointer + tailSeg->dataHere() < tailSeg->aduSize; // not enough data } if (needMoreData) { // We don't have enough data to output an ADU from the last-read MP3 // frame, so need to read another one and try again: doGetNextFrame(); return True; } // Output an ADU from the tail segment: fFrameSize = tailSeg->headerSize + tailSeg->sideInfoSize + tailSeg->aduSize; fPresentationTime = tailSeg->presentationTime; fDurationInMicroseconds = tailSeg->durationInMicroseconds; unsigned descriptorSize = fIncludeADUdescriptors ? ADUdescriptor::computeSize(fFrameSize) : 0; #ifdef DEBUG fprintf(stderr, "m->a:outputting ADU %d<-%d, nbr:%d, sis:%d, dh:%d, (descriptor size: %d)\n", tailSeg->aduSize, tailSeg->backpointer, fFrameSize, tailSeg->sideInfoSize, tailSeg->dataHere(), descriptorSize); #endif if (descriptorSize + fFrameSize > fMaxSize) { envir() << "ADUFromMP3Source::doGetNextFrame1(): not enough room (" << descriptorSize + fFrameSize << ">" << fMaxSize << ")\n"; fFrameSize = 0; return False; } unsigned char *toPtr = fTo; // output the ADU descriptor: if (fIncludeADUdescriptors) { fFrameSize += ADUdescriptor::generateDescriptor(toPtr, fFrameSize); } // output header and side info: memmove(toPtr, tailSeg->dataStart(), tailSeg->headerSize + tailSeg->sideInfoSize); toPtr += tailSeg->headerSize + tailSeg->sideInfoSize; // go back to the frame that contains the start of our data: unsigned offset = 0; unsigned i = tailIndex; unsigned prevBytes = tailSeg->backpointer; while (prevBytes > 0) { i = SegmentQueue::prevIndex(i); unsigned dataHere = fSegments->s[i].dataHere(); if (dataHere < prevBytes) { prevBytes -= dataHere; } else { offset = dataHere - prevBytes; break; } } // dequeue any segments that we no longer need: while (fSegments->headIndex() != i) { fSegments->dequeue(); // we're done with it } unsigned bytesToUse = tailSeg->aduSize; while (bytesToUse > 0) { Segment &seg = fSegments->s[i]; unsigned char *fromPtr = &seg.dataStart()[seg.headerSize + seg.sideInfoSize + offset]; unsigned dataHere = seg.dataHere() - offset; unsigned bytesUsedHere = dataHere < bytesToUse ? dataHere : bytesToUse; memmove(toPtr, fromPtr, bytesUsedHere); bytesToUse -= bytesUsedHere; toPtr += bytesUsedHere; offset = 0; i = SegmentQueue::nextIndex(i); } if (fFrameCounter++ % fScale == 0) { // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { // Don't use this frame; get another one: doGetNextFrame(); } return True; }
// Note: We should change the following to use asynchronous file reading, ##### // as we now do with ByteStreamFileSource. ##### void AMRAudioFileDescriptorSource::doGetNextFrame() { if (feof(fFid) || ferror(fFid)) { handleClosure(this); return; } // Begin by reading the 1-byte frame header (and checking it for validity) while (1) { if (fread(&fLastFrameHeader, 1, 1, fFid) < 1) { handleClosure(this); return; } if ((fLastFrameHeader&0x83) != 0) { } else { unsigned char ft = (fLastFrameHeader&0x78)>>3; fFrameSize = fIsWideband ? frameSizeWideband[ft] : frameSize[ft]; if (fFrameSize == FT_INVALID) { } else { break; } } } // Next, read the frame-block into the buffer provided: fFrameSize *= fNumChannels; // because multiple channels make up a frame-block if (fFrameSize > fMaxSize) { fNumTruncatedBytes = fFrameSize - fMaxSize; fFrameSize = fMaxSize; } fFrameSize = fread(fTo, 1, fFrameSize, fFid); // Set the 'presentation time': if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: gettimeofday(&fPresentationTime, NULL); } else { // Increment by the play time of the previous frame (20 ms) unsigned uSeconds = fPresentationTime.tv_usec + 20000; fPresentationTime.tv_sec += uSeconds/1000000; fPresentationTime.tv_usec = uSeconds%1000000; } fDurationInMicroseconds = 20000; // each frame is 20 ms // Switch to another task, and inform the reader that he has data: nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); }
void OnDemandServerMediaSubsession ::getStreamParameters(unsigned clientSessionId, netAddressBits clientAddress, Port const& clientRTPPort, Port const& clientRTCPPort, int tcpSocketNum, unsigned char rtpChannelId, unsigned char rtcpChannelId, netAddressBits& destinationAddress, u_int8_t& /*destinationTTL*/, Boolean& isMulticast, Port& serverRTPPort, Port& serverRTCPPort, void*& streamToken) { if (destinationAddress == 0) destinationAddress = clientAddress; struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress; isMulticast = False; if (fLastStreamToken != NULL && fReuseFirstSource) { // Special case: Rather than creating a new 'StreamState', // we reuse the one that we've already created: serverRTPPort = ((StreamState*)fLastStreamToken)->serverRTPPort(); serverRTCPPort = ((StreamState*)fLastStreamToken)->serverRTCPPort(); ++((StreamState*)fLastStreamToken)->referenceCount(); streamToken = fLastStreamToken; } else { // Normal case: Create a new media source: unsigned streamBitrate; FramedSource* mediaSource = createNewStreamSource(clientSessionId, streamBitrate); // Create 'groupsock' and 'sink' objects for the destination, // using previously unused server port numbers: RTPSink* rtpSink = NULL; BasicUDPSink* udpSink = NULL; Groupsock* rtpGroupsock = NULL; Groupsock* rtcpGroupsock = NULL; if (clientRTPPort.num() != 0 || tcpSocketNum >= 0) { // Normal case: Create destinations portNumBits serverPortNum; if (clientRTCPPort.num() == 0) { // We're streaming raw UDP (not RTP). Create a single groupsock: NoReuse dummy(envir()); // ensures that we skip over ports that are already in use for (serverPortNum = fInitialPortNum; ; ++serverPortNum) { struct in_addr dummyAddr; dummyAddr.s_addr = 0; serverRTPPort = serverPortNum; rtpGroupsock = new Groupsock(envir(), dummyAddr, serverRTPPort, 255); if (rtpGroupsock->socketNum() >= 0) break; // success } udpSink = BasicUDPSink::createNew(envir(), rtpGroupsock); } else { // Normal case: We're streaming RTP (over UDP or TCP). Create a pair of // groupsocks (RTP and RTCP), with adjacent port numbers (RTP port number even): NoReuse dummy(envir()); // ensures that we skip over ports that are already in use for (portNumBits serverPortNum = fInitialPortNum; ; serverPortNum += 2) { struct in_addr dummyAddr; dummyAddr.s_addr = 0; serverRTPPort = serverPortNum; rtpGroupsock = new Groupsock(envir(), dummyAddr, serverRTPPort, 255); if (rtpGroupsock->socketNum() < 0) { delete rtpGroupsock; continue; // try again } serverRTCPPort = serverPortNum+1; rtcpGroupsock = new Groupsock(envir(), dummyAddr, serverRTCPPort, 255); if (rtcpGroupsock->socketNum() < 0) { delete rtpGroupsock; delete rtcpGroupsock; continue; // try again } break; // success } unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic rtpSink = createNewRTPSink(rtpGroupsock, rtpPayloadType, mediaSource); if (rtpSink != NULL && rtpSink->estimatedBitrate() > 0) streamBitrate = rtpSink->estimatedBitrate(); } // Turn off the destinations for each groupsock. They'll get set later // (unless TCP is used instead): if (rtpGroupsock != NULL) rtpGroupsock->removeAllDestinations(); if (rtcpGroupsock != NULL) rtcpGroupsock->removeAllDestinations(); if (rtpGroupsock != NULL) { // Try to use a big send buffer for RTP - at least 0.1 second of // specified bandwidth and at least 50 KB unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024; increaseSendBufferTo(envir(), rtpGroupsock->socketNum(), rtpBufSize); } } // Set up the state of the stream. The stream will get started later: streamToken = fLastStreamToken = new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink, streamBitrate, mediaSource, rtpGroupsock, rtcpGroupsock); } // Record these destinations as being for this client session id: Destinations* destinations; if (tcpSocketNum < 0) { // UDP destinations = new Destinations(destinationAddr, clientRTPPort, clientRTCPPort); } else { // TCP destinations = new Destinations(tcpSocketNum, rtpChannelId, rtcpChannelId); } fDestinationsHashTable->Add((char const*)clientSessionId, destinations); }
char* ServerMediaSession::generateSDPDescription() { AddressString ipAddressStr(ourIPAddress(envir())); unsigned ipAddressStrSize = strlen(ipAddressStr.val()); // For a SSM sessions, we need a "a=source-filter: incl ..." line also: char* sourceFilterLine; if (fIsSSM) { char const* const sourceFilterFmt = "a=source-filter: incl IN IP4 * %s\r\n" "a=rtcp-unicast: reflection\r\n"; unsigned const sourceFilterFmtSize = strlen(sourceFilterFmt) + ipAddressStrSize + 1; sourceFilterLine = new char[sourceFilterFmtSize]; sprintf(sourceFilterLine, sourceFilterFmt, ipAddressStr.val()); } else { sourceFilterLine = strDup(""); } char* rangeLine = NULL; // for now char* sdp = NULL; // for now do { // Count the lengths of each subsession's media-level SDP lines. // (We do this first, because the call to "subsession->sdpLines()" // causes correct subsession 'duration()'s to be calculated later.) unsigned sdpLength = 0; ServerMediaSubsession* subsession; for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { char const* sdpLines = subsession->sdpLines(); if (sdpLines == NULL) continue; // the media's not available sdpLength += strlen(sdpLines); } if (sdpLength == 0) break; // the session has no usable subsessions // Unless subsessions have differing durations, we also have a "a=range:" line: float dur = duration(); if (dur == 0.0) { rangeLine = strDup("a=range:npt=0-\r\n"); } else if (dur > 0.0) { char buf[100]; sprintf(buf, "a=range:npt=0-%.3f\r\n", dur); rangeLine = strDup(buf); } else { // subsessions have differing durations, so "a=range:" lines go there rangeLine = strDup(""); } char const* const sdpPrefixFmt = "v=0\r\n" "o=- %ld%06ld %d IN IP4 %s\r\n" "s=%s\r\n" "i=%s\r\n" "t=0 0\r\n" "a=tool:%s%s\r\n" "a=type:broadcast\r\n" "a=control:*\r\n" "%s" "%s" "a=x-qt-text-nam:%s\r\n" "a=x-qt-text-inf:%s\r\n" "%s"; sdpLength += strlen(sdpPrefixFmt) + 20 + 6 + 20 + ipAddressStrSize + strlen(fDescriptionSDPString) + strlen(fInfoSDPString) + strlen(libNameStr) + strlen(libVersionStr) + strlen(sourceFilterLine) + strlen(rangeLine) + strlen(fDescriptionSDPString) + strlen(fInfoSDPString) + strlen(fMiscSDPLines); sdp = new char[sdpLength]; if (sdp == NULL) break; // Generate the SDP prefix (session-level lines): sprintf(sdp, sdpPrefixFmt, fCreationTime.tv_sec, fCreationTime.tv_usec, // o= <session id> 1, // o= <version> // (needs to change if params are modified) ipAddressStr.val(), // o= <address> fDescriptionSDPString, // s= <description> fInfoSDPString, // i= <info> libNameStr, libVersionStr, // a=tool: sourceFilterLine, // a=source-filter: incl (if a SSM session) rangeLine, // a=range: line fDescriptionSDPString, // a=x-qt-text-nam: line fInfoSDPString, // a=x-qt-text-inf: line fMiscSDPLines); // miscellaneous session SDP lines (if any) // Then, add the (media-level) lines for each subsession: char* mediaSDP = sdp; for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { mediaSDP += strlen(mediaSDP); char const* sdpLines = subsession->sdpLines(); if (sdpLines != NULL) sprintf(mediaSDP, "%s", sdpLines); } } while (0); delete[] rangeLine; delete[] sourceFilterLine; return sdp; }
void H265VideoFileServerMediaSubsession::afterPlayingDummy1() { // Unschedule any pending 'checking' task: envir().taskScheduler().unscheduleDelayedTask(nextTask()); // Signal the event loop that we're done: setDoneFlag(); }
FramedSource* AMRAudioFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 10; // kbps, estimate return AMRAudioFileSource::createNew(envir(), fFileName); }
void H264or5Fragmenter::doGetNextFrame() { if (fNumValidDataBytes == 1) { // We have no NAL unit data currently in the buffer. Read a new one: fInputSource->getNextFrame(&fInputBuffer[1], fInputBufferSize - 1, afterGettingFrame, this, FramedSource::handleClosure, this); } else { // We have NAL unit data in the buffer. There are three cases to consider: // 1. There is a new NAL unit in the buffer, and it's small enough to deliver // to the RTP sink (as is). // 2. There is a new NAL unit in the buffer, but it's too large to deliver to // the RTP sink in its entirety. Deliver the first fragment of this data, // as a FU packet, with one extra preceding header byte (for the "FU header"). // 3. There is a NAL unit in the buffer, and we've already delivered some // fragment(s) of this. Deliver the next fragment of this data, // as a FU packet, with two (H.264) or three (H.265) extra preceding header bytes // (for the "NAL header" and the "FU header"). if (fMaxSize < fMaxOutputPacketSize) { // shouldn't happen envir() << "H264or5Fragmenter::doGetNextFrame(): fMaxSize (" << fMaxSize << ") is smaller than expected\n"; } else { fMaxSize = fMaxOutputPacketSize; } fLastFragmentCompletedNALUnit = True; // by default if (fCurDataOffset == 1) { // case 1 or 2 if (fNumValidDataBytes - 1 <= fMaxSize) { // case 1 memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1); fFrameSize = fNumValidDataBytes - 1; fCurDataOffset = fNumValidDataBytes; } else { // case 2 // We need to send the NAL unit data as FU packets. Deliver the first // packet now. Note that we add "NAL header" and "FU header" bytes to the front // of the packet (overwriting the existing "NAL header"). if (fHNumber == 264) { fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit) } else { // 265 u_int8_t nal_unit_type = (fInputBuffer[1]&0x7E)>>1; fInputBuffer[0] = (fInputBuffer[1] & 0x81) | (49<<1); // Payload header (1st byte) fInputBuffer[1] = fInputBuffer[2]; // Payload header (2nd byte) fInputBuffer[2] = 0x80 | nal_unit_type; // FU header (with S bit) } memmove(fTo, fInputBuffer, fMaxSize); fFrameSize = fMaxSize; fCurDataOffset += fMaxSize - 1; fLastFragmentCompletedNALUnit = False; } } else { // case 3 // We are sending this NAL unit data as FU packets. We've already sent the // first packet (fragment). Now, send the next fragment. Note that we add // "NAL header" and "FU header" bytes to the front. (We reuse these bytes that // we already sent for the first fragment, but clear the S bit, and add the E // bit if this is the last fragment.) unsigned numExtraHeaderBytes; if (fHNumber == 264) { fInputBuffer[fCurDataOffset-2] = fInputBuffer[0]; // FU indicator fInputBuffer[fCurDataOffset-1] = fInputBuffer[1]&~0x80; // FU header (no S bit) numExtraHeaderBytes = 2; } else { // 265 fInputBuffer[fCurDataOffset-3] = fInputBuffer[0]; // Payload header (1st byte) fInputBuffer[fCurDataOffset-2] = fInputBuffer[1]; // Payload header (2nd byte) fInputBuffer[fCurDataOffset-1] = fInputBuffer[2]&~0x80; // FU header (no S bit) numExtraHeaderBytes = 3; } unsigned numBytesToSend = numExtraHeaderBytes + (fNumValidDataBytes - fCurDataOffset); if (numBytesToSend > fMaxSize) { // We can't send all of the remaining data this time: numBytesToSend = fMaxSize; fLastFragmentCompletedNALUnit = False; } else { // This is the last fragment: fInputBuffer[fCurDataOffset-1] |= 0x40; // set the E bit in the FU header fNumTruncatedBytes = fSaveNumTruncatedBytes; } memmove(fTo, &fInputBuffer[fCurDataOffset-numExtraHeaderBytes], numBytesToSend); fFrameSize = numBytesToSend; fCurDataOffset += numBytesToSend - numExtraHeaderBytes; } if (fCurDataOffset >= fNumValidDataBytes) { // We're done with this data. Reset the pointers for receiving new data: fNumValidDataBytes = fCurDataOffset = 1; } // Complete delivery to the client: FramedSource::afterGetting(this); }
void RTCPInstance::reschedule(double nextTime) { envir().taskScheduler().unscheduleDelayedTask(nextTask()); schedule(nextTime); }
void RTCPInstance::incomingReportHandler1() { do { Boolean callByeHandler = False; int tcpReadStreamSocketNum = fRTCPInterface.nextTCPReadStreamSocketNum(); unsigned char tcpReadStreamChannelId = fRTCPInterface.nextTCPReadStreamChannelId(); unsigned packetSize = 0; unsigned numBytesRead; struct sockaddr_in fromAddress; Boolean packetReadWasIncomplete; Boolean readResult = fRTCPInterface.handleRead(&fInBuf[fNumBytesAlreadyRead], maxPacketSize - fNumBytesAlreadyRead, numBytesRead, fromAddress, packetReadWasIncomplete); if (packetReadWasIncomplete) { fNumBytesAlreadyRead += numBytesRead; return; // more reads are needed to get the entire packet } else { // normal case: We've read the entire packet packetSize = fNumBytesAlreadyRead + numBytesRead; fNumBytesAlreadyRead = 0; // for next time } if (!readResult) break; // Ignore the packet if it was looped-back from ourself: Boolean packetWasFromOurHost = False; if (RTCPgs()->wasLoopedBackFromUs(envir(), fromAddress)) { packetWasFromOurHost = True; // However, we still want to handle incoming RTCP packets from // *other processes* on the same machine. To distinguish this // case from a true loop-back, check whether we've just sent a // packet of the same size. (This check isn't perfect, but it seems // to be the best we can do.) if (fHaveJustSentPacket && fLastPacketSentSize == packetSize) { // This is a true loop-back: fHaveJustSentPacket = False; break; // ignore this packet } } unsigned char* pkt = fInBuf; if (fIsSSMSource && !packetWasFromOurHost) { // This packet is assumed to have been received via unicast (because we're a SSM source, and SSM receivers send back RTCP "RR" // packets via unicast). 'Reflect' the packet by resending it to the multicast group, so that any other receivers can also // get to see it. // NOTE: Denial-of-service attacks are possible here. // Users of this software may wish to add their own, // application-specific mechanism for 'authenticating' the // validity of this packet before reflecting it. // NOTE: The test for "!packetWasFromOurHost" means that we won't reflect RTCP packets that come from other processes on // the same host as us. The reason for this is that the 'packet size' test above is not 100% reliable; some packets // that were truly looped back from us might not be detected as such, and this might lead to infinite forwarding/receiving // of some packets. To avoid this possibility, we only reflect RTCP packets that we know for sure originated elsewhere. // (Note, though, that if we ever re-enable the code in "Groupsock::multicastSendOnly()", then we could remove the test for // "!packetWasFromOurHost".) fRTCPInterface.sendPacket(pkt, packetSize); fHaveJustSentPacket = True; fLastPacketSentSize = packetSize; } #ifdef DEBUG fprintf(stderr, "[%p]saw incoming RTCP packet (from address %s, port %d)\n", this, AddressString(fromAddress).val(), ntohs(fromAddress.sin_port)); for (unsigned i = 0; i < packetSize; ++i) { if (i%4 == 0) fprintf(stderr, " "); fprintf(stderr, "%02x", pkt[i]); } fprintf(stderr, "\n"); #endif int totPacketSize = IP_UDP_HDR_SIZE + packetSize; // Check the RTCP packet for validity: // It must at least contain a header (4 bytes), and this header // must be version=2, with no padding bit, and a payload type of // SR (200) or RR (201): if (packetSize < 4) break; unsigned rtcpHdr = ntohl(*(u_int32_t*)pkt); if ((rtcpHdr & 0xE0FE0000) != (0x80000000 | (RTCP_PT_SR<<16))) { #ifdef DEBUG fprintf(stderr, "rejected bad RTCP packet: header 0x%08x\n", rtcpHdr); #endif break; } // Process each of the individual RTCP 'subpackets' in (what may be) // a compound RTCP packet. int typeOfPacket = PACKET_UNKNOWN_TYPE; unsigned reportSenderSSRC = 0; Boolean packetOK = False; while (1) { unsigned rc = (rtcpHdr>>24)&0x1F; unsigned pt = (rtcpHdr>>16)&0xFF; unsigned length = 4*(rtcpHdr&0xFFFF); // doesn't count hdr ADVANCE(4); // skip over the header if (length > packetSize) break; // Assume that each RTCP subpacket begins with a 4-byte SSRC: if (length < 4) break; length -= 4; reportSenderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4); Boolean subPacketOK = False; switch (pt) { case RTCP_PT_SR: { #ifdef DEBUG fprintf(stderr, "SR\n"); #endif if (length < 20) break; length -= 20; // Extract the NTP timestamp, and note this: unsigned NTPmsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned NTPlsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned rtpTimestamp = ntohl(*(u_int32_t*)pkt); ADVANCE(4); if (fSource != NULL) { RTPReceptionStatsDB& receptionStats = fSource->receptionStatsDB(); receptionStats.noteIncomingSR(reportSenderSSRC, NTPmsw, NTPlsw, rtpTimestamp); } ADVANCE(8); // skip over packet count, octet count // If a 'SR handler' was set, call it now: if (fSRHandlerTask != NULL) (*fSRHandlerTask)(fSRHandlerClientData); // The rest of the SR is handled like a RR (so, no "break;" here) } case RTCP_PT_RR: { #ifdef DEBUG fprintf(stderr, "RR\n"); #endif unsigned reportBlocksSize = rc*(6*4); if (length < reportBlocksSize) break; length -= reportBlocksSize; if (fSink != NULL) { // Use this information to update stats about our transmissions: RTPTransmissionStatsDB& transmissionStats = fSink->transmissionStatsDB(); for (unsigned i = 0; i < rc; ++i) { unsigned senderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4); // We care only about reports about our own transmission, not others' if (senderSSRC == fSink->SSRC()) { unsigned lossStats = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned highestReceived = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned jitter = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned timeLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned timeSinceLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4); transmissionStats.noteIncomingRR(reportSenderSSRC, fromAddress, lossStats, highestReceived, jitter, timeLastSR, timeSinceLastSR); } else { ADVANCE(4*5); } } } else { ADVANCE(reportBlocksSize); } if (pt == RTCP_PT_RR) { // i.e., we didn't fall through from 'SR' // If a 'RR handler' was set, call it now: // Specific RR handler: if (fSpecificRRHandlerTable != NULL) { netAddressBits fromAddr; portNumBits fromPortNum; if (tcpReadStreamSocketNum < 0) { // Normal case: We read the RTCP packet over UDP fromAddr = fromAddress.sin_addr.s_addr; fromPortNum = ntohs(fromAddress.sin_port); } else { // Special case: We read the RTCP packet over TCP (interleaved) // Hack: Use the TCP socket and channel id to look up the handler fromAddr = tcpReadStreamSocketNum; fromPortNum = tcpReadStreamChannelId; } Port fromPort(fromPortNum); RRHandlerRecord* rrHandler = (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddr, (~0), fromPort)); if (rrHandler != NULL) { if (rrHandler->rrHandlerTask != NULL) { (*(rrHandler->rrHandlerTask))(rrHandler->rrHandlerClientData); } } } // General RR handler: if (fRRHandlerTask != NULL) (*fRRHandlerTask)(fRRHandlerClientData); } subPacketOK = True; typeOfPacket = PACKET_RTCP_REPORT; break; } case RTCP_PT_BYE: { #ifdef DEBUG fprintf(stderr, "BYE\n"); #endif // If a 'BYE handler' was set, arrange for it to be called at the end of this routine. // (Note: We don't call it immediately, in case it happens to cause "this" to be deleted.) if (fByeHandlerTask != NULL && (!fByeHandleActiveParticipantsOnly || (fSource != NULL && fSource->receptionStatsDB().lookup(reportSenderSSRC) != NULL) || (fSink != NULL && fSink->transmissionStatsDB().lookup(reportSenderSSRC) != NULL))) { callByeHandler = True; } // We should really check for & handle >1 SSRCs being present ##### subPacketOK = True; typeOfPacket = PACKET_BYE; break; } // Later handle SDES, APP, and compound RTCP packets ##### default: #ifdef DEBUG fprintf(stderr, "UNSUPPORTED TYPE(0x%x)\n", pt); #endif subPacketOK = True; break; } if (!subPacketOK) break; // need to check for (& handle) SSRC collision! ##### #ifdef DEBUG fprintf(stderr, "validated RTCP subpacket (type %d): %d, %d, %d, 0x%08x\n", typeOfPacket, rc, pt, length, reportSenderSSRC); #endif // Skip over any remaining bytes in this subpacket: ADVANCE(length); // Check whether another RTCP 'subpacket' follows: if (packetSize == 0) { packetOK = True; break; } else if (packetSize < 4) { #ifdef DEBUG fprintf(stderr, "extraneous %d bytes at end of RTCP packet!\n", packetSize); #endif break; } rtcpHdr = ntohl(*(u_int32_t*)pkt); if ((rtcpHdr & 0xC0000000) != 0x80000000) { #ifdef DEBUG fprintf(stderr, "bad RTCP subpacket: header 0x%08x\n", rtcpHdr); #endif break; } } if (!packetOK) { #ifdef DEBUG fprintf(stderr, "rejected bad RTCP subpacket: header 0x%08x\n", rtcpHdr); #endif break; } else { #ifdef DEBUG fprintf(stderr, "validated entire RTCP packet\n"); #endif } onReceive(typeOfPacket, totPacketSize, reportSenderSSRC); // Finally, if we need to call a "BYE" handler, do so now (in case it causes "this" to get deleted): if (callByeHandler && fByeHandlerTask != NULL/*sanity check*/) { TaskFunc* byeHandler = fByeHandlerTask; fByeHandlerTask = NULL; // because we call the handler only once, by default (*byeHandler)(fByeHandlerClientData); } } while (0); }
FramedSource* H264QueueServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { //TODO: WTF estBitrate = 2000; // kbps, estimate return H264VideoStreamDiscreteFramer::createNew(envir(), replicator->createStreamReplica()); }
FramedSource* ProxyServerMediaSubsession::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession; if (verbosityLevel() > 0) { envir() << *this << "::createNewStreamSource(session id " << clientSessionId << ")\n"; } // If we haven't yet created a data source from our 'media subsession' object, initiate() it to do so: if (fClientMediaSubsession.readSource() == NULL) { fClientMediaSubsession.receiveRawMP3ADUs(); // hack for MPA-ROBUST streams fClientMediaSubsession.receiveRawJPEGFrames(); // hack for proxying JPEG/RTP streams. (Don't do this if we're transcoding.) fClientMediaSubsession.initiate(); if (verbosityLevel() > 0) { envir() << "\tInitiated: " << *this << "\n"; } if (fClientMediaSubsession.readSource() != NULL) { // First, check whether we have defined a 'transcoder' filter to be used with this codec: if (sms->fTranscodingTable != NULL) { char* outputCodecName; FramedFilter* transcoder = sms->fTranscodingTable->lookupTranscoder(fClientMediaSubsession, outputCodecName); if (transcoder != NULL) { fClientMediaSubsession.addFilter(transcoder); delete[] (char*)fCodecName; fCodecName = outputCodecName; } } // Then, add to the front of all data sources a filter that will 'normalize' their frames' // presentation times, before the frames get re-transmitted by our server: FramedFilter* normalizerFilter = sms->fPresentationTimeSessionNormalizer ->createNewPresentationTimeSubsessionNormalizer(fClientMediaSubsession.readSource(), fClientMediaSubsession.rtpSource(), fCodecName); fClientMediaSubsession.addFilter(normalizerFilter); // Some data sources require a 'framer' object to be added, before they can be fed into // a "RTPSink". Adjust for this now: if (strcmp(fCodecName, "H264") == 0) { fClientMediaSubsession.addFilter(H264VideoStreamDiscreteFramer ::createNew(envir(), fClientMediaSubsession.readSource())); } else if (strcmp(fCodecName, "H265") == 0) { fClientMediaSubsession.addFilter(H265VideoStreamDiscreteFramer ::createNew(envir(), fClientMediaSubsession.readSource())); } else if (strcmp(fCodecName, "MP4V-ES") == 0) { fClientMediaSubsession.addFilter(MPEG4VideoStreamDiscreteFramer ::createNew(envir(), fClientMediaSubsession.readSource(), True/* leave PTs unmodified*/)); } else if (strcmp(fCodecName, "MPV") == 0) { fClientMediaSubsession.addFilter(MPEG1or2VideoStreamDiscreteFramer ::createNew(envir(), fClientMediaSubsession.readSource(), False, 5.0, True/* leave PTs unmodified*/)); } else if (strcmp(fCodecName, "DV") == 0) { fClientMediaSubsession.addFilter(DVVideoStreamFramer ::createNew(envir(), fClientMediaSubsession.readSource(), False, True/* leave PTs unmodified*/)); } } if (fClientMediaSubsession.rtcpInstance() != NULL) { fClientMediaSubsession.rtcpInstance()->setByeHandler(subsessionByeHandler, this); } } ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient; if (clientSessionId != 0) { // We're being called as a result of implementing a RTSP "SETUP". if (!fHaveSetupStream) { // This is our first "SETUP". Send RTSP "SETUP" and later "PLAY" commands to the proxied server, to start streaming: // (Before sending "SETUP", enqueue ourselves on the "RTSPClient"s 'SETUP queue', so we'll be able to get the correct // "ProxyServerMediaSubsession" to handle the response. (Note that responses come back in the same order as requests.)) Boolean queueWasEmpty = proxyRTSPClient->fSetupQueueHead == NULL; if (queueWasEmpty) { if (proxyRTSPClient->fSetupQueueTail != NULL) fprintf(stderr, "##### INTERNAL ERROR 3\n"); proxyRTSPClient->fSetupQueueHead = this; } else { if (proxyRTSPClient->fSetupQueueTail == NULL) fprintf(stderr, "##### INTERNAL ERROR 4\n"); else //##### TEMP FOR DEBUGGING proxyRTSPClient->fSetupQueueTail->fNext = this; } proxyRTSPClient->fSetupQueueTail = this; // Hack: If there's already a pending "SETUP" request (for another track), don't send this track's "SETUP" right away, because // the server might not properly handle 'pipelined' requests. Instead, wait until after previous "SETUP" responses come back. if (queueWasEmpty) { proxyRTSPClient->sendSetupCommand(fClientMediaSubsession, ::continueAfterSETUP, False, proxyRTSPClient->fStreamRTPOverTCP, False, proxyRTSPClient->auth()); ++proxyRTSPClient->fNumSetupsDone; fHaveSetupStream = True; } } else { // This is a "SETUP" from a new client. We know that there are no other currently active clients (otherwise we wouldn't // have been called here), so we know that the substream was previously "PAUSE"d. Send "PLAY" downstream once again, // to resume the stream: if (!proxyRTSPClient->fLastCommandWasPLAY) { // so that we send only one "PLAY"; not one for each subsession proxyRTSPClient->sendPlayCommand(fClientMediaSubsession.parentSession(), ::continueAfterPLAY, -1.0f/*resume from previous point*/, -1.0f, 1.0f, proxyRTSPClient->auth()); proxyRTSPClient->fLastCommandWasPLAY = True; } } } estBitrate = fClientMediaSubsession.bandwidth(); if (estBitrate == 0) estBitrate = 50; // kbps, estimate return fClientMediaSubsession.readSource(); }
char* SIPClient::invite1(Authenticator* authenticator) { do { // Send the INVITE command: // First, construct an authenticator string: fValidAuthenticator.reset(); fWorkingAuthenticator = authenticator; char* authenticatorStr = createAuthenticatorString(fWorkingAuthenticator, "INVITE", fURL); // Then, construct the SDP description to be sent in the INVITE: char* rtpmapLine; unsigned rtpmapLineSize; if (fMIMESubtypeSize > 0) { char const* const rtpmapFmt = "a=rtpmap:%u %s/8000\r\n"; unsigned rtpmapFmtSize = strlen(rtpmapFmt) + 3 /* max char len */ + fMIMESubtypeSize; rtpmapLine = new char[rtpmapFmtSize]; sprintf(rtpmapLine, rtpmapFmt, fDesiredAudioRTPPayloadFormat, fMIMESubtype); rtpmapLineSize = strlen(rtpmapLine); } else { // Static payload type => no "a=rtpmap:" line rtpmapLine = strDup(""); rtpmapLineSize = 0; } char const* const inviteSDPFmt = "v=0\r\n" "o=- %u %u IN IP4 %s\r\n" "s=%s session\r\n" "c=IN IP4 %s\r\n" "t=0 0\r\n" "m=audio %u RTP/AVP %u\r\n" "%s"; unsigned inviteSDPFmtSize = strlen(inviteSDPFmt) + 20 /* max int len */ + 20 + fOurAddressStrSize + fApplicationNameSize + fOurAddressStrSize + 5 /* max short len */ + 3 /* max char len */ + rtpmapLineSize; delete[] fInviteSDPDescription; fInviteSDPDescription = new char[inviteSDPFmtSize]; sprintf(fInviteSDPDescription, inviteSDPFmt, fCallId, fCSeq, fOurAddressStr, fApplicationName, fOurAddressStr, fClientStartPortNum, fDesiredAudioRTPPayloadFormat, rtpmapLine); unsigned inviteSDPSize = strlen(fInviteSDPDescription); delete[] rtpmapLine; char const* const cmdFmt = "INVITE %s SIP/2.0\r\n" "From: %s <sip:%s@%s>;tag=%u\r\n" "Via: SIP/2.0/UDP %s:%u\r\n" "To: %s\r\n" "Contact: sip:%s@%s:%u\r\n" "Call-ID: %u@%s\r\n" "CSeq: %d INVITE\r\n" "Content-Type: application/sdp\r\n" "%s" /* Proxy-Authorization: line (if any) */ "%s" /* User-Agent: line */ "Content-length: %d\r\n\r\n" "%s"; unsigned inviteCmdSize = strlen(cmdFmt) + fURLSize + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */ + fOurAddressStrSize + 5 /* max port len */ + fURLSize + fUserNameSize + fOurAddressStrSize + 5 + 20 + fOurAddressStrSize + 20 + strlen(authenticatorStr) + fUserAgentHeaderStrSize + 20 + inviteSDPSize; delete[] fInviteCmd; fInviteCmd = new char[inviteCmdSize]; sprintf(fInviteCmd, cmdFmt, fURL, fUserName, fUserName, fOurAddressStr, fFromTag, fOurAddressStr, fOurPortNum, fURL, fUserName, fOurAddressStr, fOurPortNum, fCallId, fOurAddressStr, ++fCSeq, authenticatorStr, fUserAgentHeaderStr, inviteSDPSize, fInviteSDPDescription); fInviteCmdSize = strlen(fInviteCmd); delete[] authenticatorStr; // Before sending the "INVITE", arrange to handle any response packets, // and set up timers: fInviteClientState = Calling; fEventLoopStopFlag = 0; TaskScheduler& sched = envir().taskScheduler(); // abbrev. sched.turnOnBackgroundReadHandling(fOurSocket->socketNum(), &inviteResponseHandler, this); fTimerALen = 1*fT1; // initially fTimerACount = 0; // initially fTimerA = sched.scheduleDelayedTask(fTimerALen, timerAHandler, this); fTimerB = sched.scheduleDelayedTask(64*fT1, timerBHandler, this); fTimerD = NULL; // for now if (!sendINVITE()) break; // Enter the event loop, to handle response packets, and timeouts: envir().taskScheduler().doEventLoop(&fEventLoopStopFlag); // We're finished with this "INVITE". // Turn off response handling and timers: sched.turnOffBackgroundReadHandling(fOurSocket->socketNum()); sched.unscheduleDelayedTask(fTimerA); sched.unscheduleDelayedTask(fTimerB); sched.unscheduleDelayedTask(fTimerD); // NOTE: We return the SDP description that we used in the "INVITE", // not the one that we got from the server. // ##### Later: match the codecs in the response (offer, answer) ##### if (fInviteSDPDescription != NULL) { return strDup(fInviteSDPDescription); } } while (0); fInviteStatusCode = 2; return NULL; }
RTPSink* ProxyServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { if (verbosityLevel() > 0) { envir() << *this << "::createNewRTPSink()\n"; } // Create (and return) the appropriate "RTPSink" object for our codec: // (Note: The configuration string might not be correct if a transcoder is used. FIX!) ##### RTPSink* newSink; if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) { newSink = AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency()); #if 0 // This code does not work; do *not* enable it: } else if (strcmp(fCodecName, "AMR") == 0 || strcmp(fCodecName, "AMR-WB") == 0) { Boolean isWideband = strcmp(fCodecName, "AMR-WB") == 0; newSink = AMRAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, isWideband, fClientMediaSubsession.numChannels()); #endif } else if (strcmp(fCodecName, "DV") == 0) { newSink = DVVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(fCodecName, "GSM") == 0) { newSink = GSMAudioRTPSink::createNew(envir(), rtpGroupsock); } else if (strcmp(fCodecName, "H263-1998") == 0 || strcmp(fCodecName, "H263-2000") == 0) { newSink = H263plusVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency()); } else if (strcmp(fCodecName, "H264") == 0) { newSink = H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.fmtp_spropparametersets()); } else if (strcmp(fCodecName, "H265") == 0) { newSink = H265VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.fmtp_spropvps(), fClientMediaSubsession.fmtp_spropsps(), fClientMediaSubsession.fmtp_sproppps()); } else if (strcmp(fCodecName, "JPEG") == 0) { newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, 26, 90000, "video", "JPEG", 1/*numChannels*/, False/*allowMultipleFramesPerPacket*/, False/*doNormalMBitRule*/); } else if (strcmp(fCodecName, "MP4A-LATM") == 0) { newSink = MPEG4LATMAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.fmtp_config(), fClientMediaSubsession.numChannels()); } else if (strcmp(fCodecName, "MP4V-ES") == 0) { newSink = MPEG4ESVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.attrVal_unsigned("profile-level-id"), fClientMediaSubsession.fmtp_config()); } else if (strcmp(fCodecName, "MPA") == 0) { newSink = MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock); } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) { newSink = MP3ADURTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) { newSink = MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.mediumName(), fClientMediaSubsession.attrVal_str("mode"), fClientMediaSubsession.fmtp_config(), fClientMediaSubsession.numChannels()); } else if (strcmp(fCodecName, "MPV") == 0) { newSink = MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock); } else if (strcmp(fCodecName, "OPUS") == 0) { newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, 48000, "audio", "OPUS", 2, False/*only 1 Opus 'packet' in each RTP packet*/); } else if (strcmp(fCodecName, "T140") == 0) { newSink = T140TextRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(fCodecName, "THEORA") == 0) { newSink = TheoraVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.fmtp_config()); } else if (strcmp(fCodecName, "VORBIS") == 0) { newSink = VorbisAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.numChannels(), fClientMediaSubsession.fmtp_config()); } else if (strcmp(fCodecName, "VP8") == 0) { newSink = VP8VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(fCodecName, "VP9") == 0) { newSink = VP9VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(fCodecName, "AMR") == 0 || strcmp(fCodecName, "AMR-WB") == 0) { // Proxying of these codecs is currently *not* supported, because the data received by the "RTPSource" object is not in a // form that can be fed directly into a corresponding "RTPSink" object. if (verbosityLevel() > 0) { envir() << "\treturns NULL (because we currently don't support the proxying of \"" << fClientMediaSubsession.mediumName() << "/" << fCodecName << "\" streams)\n"; } return NULL; } else if (strcmp(fCodecName, "QCELP") == 0 || strcmp(fCodecName, "H261") == 0 || strcmp(fCodecName, "H263-1998") == 0 || strcmp(fCodecName, "H263-2000") == 0 || strcmp(fCodecName, "X-QT") == 0 || strcmp(fCodecName, "X-QUICKTIME") == 0) { // This codec requires a specialized RTP payload format; however, we don't yet have an appropriate "RTPSink" subclass for it: if (verbosityLevel() > 0) { envir() << "\treturns NULL (because we don't have a \"RTPSink\" subclass for this RTP payload format)\n"; } return NULL; } else { // This codec is assumed to have a simple RTP payload format that can be implemented just with a "SimpleRTPSink": Boolean allowMultipleFramesPerPacket = True; // by default Boolean doNormalMBitRule = True; // by default // Some codecs change the above default parameters: if (strcmp(fCodecName, "MP2T") == 0) { doNormalMBitRule = False; // no RTP 'M' bit } newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.mediumName(), fCodecName, fClientMediaSubsession.numChannels(), allowMultipleFramesPerPacket, doNormalMBitRule); } // Because our relayed frames' presentation times are inaccurate until the input frames have been RTCP-synchronized, // we temporarily disable RTCP "SR" reports for this "RTPSink" object: newSink->enableRTCPReports() = False; // Also tell our "PresentationTimeSubsessionNormalizer" object about the "RTPSink", so it can enable RTCP "SR" reports later: PresentationTimeSubsessionNormalizer* ssNormalizer; if (strcmp(fCodecName, "H264") == 0 || strcmp(fCodecName, "H265") == 0 || strcmp(fCodecName, "MP4V-ES") == 0 || strcmp(fCodecName, "MPV") == 0 || strcmp(fCodecName, "DV") == 0) { // There was a separate 'framer' object in front of the "PresentationTimeSubsessionNormalizer", so go back one object to get it: ssNormalizer = (PresentationTimeSubsessionNormalizer*)(((FramedFilter*)inputSource)->inputSource()); } else { ssNormalizer = (PresentationTimeSubsessionNormalizer*)inputSource; } ssNormalizer->setRTPSink(newSink); return newSink; }
void SIPClient::doInviteStateMachine(unsigned responseCode) { // Implement the state transition diagram (RFC 3261, Figure 5) TaskScheduler& sched = envir().taskScheduler(); // abbrev. switch (fInviteClientState) { case Calling: { if (responseCode == timerAFires) { // Restart timer A (with double the timeout interval): fTimerALen *= 2; fTimerA = sched.scheduleDelayedTask(fTimerALen, timerAHandler, this); fInviteClientState = Calling; if (!sendINVITE()) doInviteStateTerminated(0); } else { // Turn off timers A & B before moving to a new state: sched.unscheduleDelayedTask(fTimerA); sched.unscheduleDelayedTask(fTimerB); if (responseCode == timerBFires) { envir().setResultMsg("No response from server"); doInviteStateTerminated(0); } else if (responseCode >= 100 && responseCode <= 199) { fInviteClientState = Proceeding; } else if (responseCode >= 200 && responseCode <= 299) { doInviteStateTerminated(responseCode); } else if (responseCode >= 400 && responseCode <= 499) { doInviteStateTerminated(responseCode); // this isn't what the spec says, but it seems right... } else if (responseCode >= 300 && responseCode <= 699) { fInviteClientState = Completed; fTimerD = sched.scheduleDelayedTask(32000000, timerDHandler, this); if (!sendACK()) doInviteStateTerminated(0); } } break; } case Proceeding: { if (responseCode >= 100 && responseCode <= 199) { fInviteClientState = Proceeding; } else if (responseCode >= 200 && responseCode <= 299) { doInviteStateTerminated(responseCode); } else if (responseCode >= 400 && responseCode <= 499) { doInviteStateTerminated(responseCode); // this isn't what the spec says, but it seems right... } else if (responseCode >= 300 && responseCode <= 699) { fInviteClientState = Completed; fTimerD = sched.scheduleDelayedTask(32000000, timerDHandler, this); if (!sendACK()) doInviteStateTerminated(0); } break; } case Completed: { if (responseCode == timerDFires) { envir().setResultMsg("Transaction terminated"); doInviteStateTerminated(0); } else if (responseCode >= 300 && responseCode <= 699) { fInviteClientState = Completed; if (!sendACK()) doInviteStateTerminated(0); } break; } case Terminated: { doInviteStateTerminated(responseCode); break; } } }
FramedSource* H264VideoFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 500; // 500 kbps, estimate int streamId = -1; if (strncmp(fFileName, "live_stream1", 12) == 0) { streamId = 0; } else if (strncmp(fFileName, "live_stream2", 12) == 0) { streamId = 1; } else if (strncmp(fFileName, "live_stream3", 12) == 0) { streamId = 2; } else if (strncmp(fFileName, "live_stream4", 12) == 0) { streamId = 3; } else { #if defined( USE_V3_3_CODE ) // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Create a framer for the Video Elementary Stream: if (fEncType == IAV_ENCODE_H264) { return MyH264VideoStreamFramer::createNew(envir(), fileSource); } else if (fEncType == IAV_ENCODE_MJPEG) { return NULL; //not realized } else { return NULL; } #else // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); return MyH264VideoStreamFramer::createNew(envir(), fileSource); #endif } #if defined( USE_V3_3_CODE ) if (fEncType == IAV_ENCODE_H264) { return MyH264VideoStreamFramer::createNew(envir(), streamId); } else if (fEncType == IAV_ENCODE_MJPEG) { int jpegQuality = getJpegQ(streamId); if ( jpegQuality < 0) { return NULL; } return MyJPEGVideoSource::createNew(envir(), streamId, jpegQuality); } else { return NULL; } #else return MyH264VideoStreamFramer::createNew( envir(), streamId, fIPCMediaDataDispatchServerPort, fIPCMediaDataDispatchClientPort ); #endif }
SIPClient::SIPClient(UsageEnvironment& env, unsigned char desiredAudioRTPPayloadFormat, char const* mimeSubtype, int verbosityLevel, char const* applicationName) : Medium(env), fT1(500000 /* 500 ms */), fDesiredAudioRTPPayloadFormat(desiredAudioRTPPayloadFormat), fVerbosityLevel(verbosityLevel), fCSeq(0), fURL(NULL), fURLSize(0), fToTagStr(NULL), fToTagStrSize(0), fUserName(NULL), fUserNameSize(0), fInviteSDPDescription(NULL), fInviteCmd(NULL), fInviteCmdSize(0){ if (mimeSubtype == NULL) mimeSubtype = ""; fMIMESubtype = strDup(mimeSubtype); fMIMESubtypeSize = strlen(fMIMESubtype); if (applicationName == NULL) applicationName = ""; fApplicationName = strDup(applicationName); fApplicationNameSize = strlen(fApplicationName); struct in_addr ourAddress; ourAddress.s_addr = ourIPAddress(env); // hack fOurAddressStr = strDup(our_inet_ntoa(ourAddress)); fOurAddressStrSize = strlen(fOurAddressStr); fOurSocket = new Groupsock(env, ourAddress, 0, 255); if (fOurSocket == NULL) { env << "ERROR: Failed to create socket for addr " << our_inet_ntoa(ourAddress) << ": " << env.getResultMsg() << "\n"; } // Now, find out our source port number. Hack: Do this by first trying to // send a 0-length packet, so that the "getSourcePort()" call will work. fOurSocket->output(envir(), 255, (unsigned char*)"", 0); Port srcPort(0); getSourcePort(env, fOurSocket->socketNum(), srcPort); if (srcPort.num() != 0) { fOurPortNum = ntohs(srcPort.num()); } else { // No luck. Try again using a default port number: fOurPortNum = 5060; delete fOurSocket; fOurSocket = new Groupsock(env, ourAddress, fOurPortNum, 255); if (fOurSocket == NULL) { env << "ERROR: Failed to create socket for addr " << our_inet_ntoa(ourAddress) << ", port " << fOurPortNum << ": " << env.getResultMsg() << "\n"; } } // Set various headers to be used in each request: char const* formatStr; unsigned headerSize; // Set the "User-Agent:" header: char const* const libName = "LIVE555 Streaming Media v"; char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING; char const* libPrefix; char const* libSuffix; if (applicationName == NULL || applicationName[0] == '\0') { applicationName = libPrefix = libSuffix = ""; } else { libPrefix = " ("; libSuffix = ")"; } formatStr = "User-Agent: %s%s%s%s%s\r\n"; headerSize = strlen(formatStr) + fApplicationNameSize + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix); fUserAgentHeaderStr = new char[headerSize]; sprintf(fUserAgentHeaderStr, formatStr, applicationName, libPrefix, libName, libVersionStr, libSuffix); fUserAgentHeaderStrSize = strlen(fUserAgentHeaderStr); reset(); }
RTPSink* H263plusVideoFileServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return H263plusVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); }
unsigned SIPClient::getResponseCode() { unsigned responseCode = 0; do { // Get the response from the server: unsigned const readBufSize = 10000; char readBuffer[readBufSize+1]; char* readBuf = readBuffer; char* firstLine = NULL; char* nextLineStart = NULL; unsigned bytesRead = getResponse(readBuf, readBufSize); if (bytesRead == 0) break; if (fVerbosityLevel >= 1) { envir() << "Received INVITE response: " << readBuf << "\n"; } // Inspect the first line to get the response code: firstLine = readBuf; nextLineStart = getLine(firstLine); if (!parseResponseCode(firstLine, responseCode)) break; if (responseCode != 200) { if (responseCode >= 400 && responseCode <= 499 && fWorkingAuthenticator != NULL) { // We have an authentication failure, so fill in // "*fWorkingAuthenticator" using the contents of a following // "Proxy-Authenticate:" line. (Once we compute a 'response' for // "fWorkingAuthenticator", it can be used in a subsequent request // - that will hopefully succeed.) char* lineStart; while (1) { lineStart = nextLineStart; if (lineStart == NULL) break; nextLineStart = getLine(lineStart); if (lineStart[0] == '\0') break; // this is a blank line char* realm = strDupSize(lineStart); char* nonce = strDupSize(lineStart); // ##### Check for the format of "Proxy-Authenticate:" lines from // ##### known server types. // ##### This is a crock! We should make the parsing more general Boolean foundAuthenticateHeader = False; if ( // Asterisk ##### sscanf(lineStart, "Proxy-Authenticate: Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"", realm, nonce) == 2 || // Cisco ATA ##### sscanf(lineStart, "Proxy-Authenticate: Digest algorithm=MD5,domain=\"%*[^\"]\",nonce=\"%[^\"]\", realm=\"%[^\"]\"", nonce, realm) == 2) { fWorkingAuthenticator->setRealmAndNonce(realm, nonce); foundAuthenticateHeader = True; } delete[] realm; delete[] nonce; if (foundAuthenticateHeader) break; } } envir().setResultMsg("cannot handle INVITE response: ", firstLine); break; } // Skip every subsequent header line, until we see a blank line. // While doing so, check for "To:" and "Content-Length:" lines. // The remaining data is assumed to be the SDP descriptor that we want. // We should really do some more checking on the headers here - e.g., to // check for "Content-type: application/sdp", "CSeq", etc. ##### int contentLength = -1; char* lineStart; while (1) { lineStart = nextLineStart; if (lineStart == NULL) break; nextLineStart = getLine(lineStart); if (lineStart[0] == '\0') break; // this is a blank line char* toTagStr = strDupSize(lineStart); if (sscanf(lineStart, "To:%*[^;]; tag=%s", toTagStr) == 1) { delete[] (char*)fToTagStr; fToTagStr = strDup(toTagStr); fToTagStrSize = strlen(fToTagStr); } delete[] toTagStr; if (sscanf(lineStart, "Content-Length: %d", &contentLength) == 1 || sscanf(lineStart, "Content-length: %d", &contentLength) == 1) { if (contentLength < 0) { envir().setResultMsg("Bad \"Content-length:\" header: \"", lineStart, "\""); break; } } } // We're now at the end of the response header lines if (lineStart == NULL) { envir().setResultMsg("no content following header lines: ", readBuf); break; } // Use the remaining data as the SDP descr, but first, check // the "Content-length:" header (if any) that we saw. We may need to // read more data, or we may have extraneous data in the buffer. char* bodyStart = nextLineStart; if (bodyStart != NULL && contentLength >= 0) { // We saw a "Content-length:" header unsigned numBodyBytes = &readBuf[bytesRead] - bodyStart; if (contentLength > (int)numBodyBytes) { // We need to read more data. First, make sure we have enough // space for it: unsigned numExtraBytesNeeded = contentLength - numBodyBytes; #ifdef USING_TCP // THIS CODE WORKS ONLY FOR TCP: ##### unsigned remainingBufferSize = readBufSize - (bytesRead + (readBuf - readBuffer)); if (numExtraBytesNeeded > remainingBufferSize) { char tmpBuf[200]; sprintf(tmpBuf, "Read buffer size (%d) is too small for \"Content-length:\" %d (need a buffer size of >= %d bytes\n", readBufSize, contentLength, readBufSize + numExtraBytesNeeded - remainingBufferSize); envir().setResultMsg(tmpBuf); break; } // Keep reading more data until we have enough: if (fVerbosityLevel >= 1) { envir() << "Need to read " << numExtraBytesNeeded << " extra bytes\n"; } while (numExtraBytesNeeded > 0) { char* ptr = &readBuf[bytesRead]; unsigned bytesRead2; struct sockaddr_in fromAddr; Boolean readSuccess = fOurSocket->handleRead((unsigned char*)ptr, numExtraBytesNeeded, bytesRead2, fromAddr); if (!readSuccess) break; ptr[bytesRead2] = '\0'; if (fVerbosityLevel >= 1) { envir() << "Read " << bytesRead2 << " extra bytes: " << ptr << "\n"; } bytesRead += bytesRead2; numExtraBytesNeeded -= bytesRead2; } #endif if (numExtraBytesNeeded > 0) break; // one of the reads failed } bodyStart[contentLength] = '\0'; // trims any extra data } } while (0); return responseCode; }
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { // We've just received a frame of data. (Optionally) print out information about it: if(numTruncatedBytes > 0) { printf("============== warnning, live555 truncate %d bytes =================\n", numTruncatedBytes); } #ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes"; if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)"; char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec); envir() << ".\tPresentation time: " << (unsigned)presentationTime.tv_sec << "." << uSecsStr; if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized } envir() << "\n"; #endif // 分辨率已经发生变化,不再上报数据 if (_drop) { printf("############################### drop frame\n"); return; } if(_cb) { RtspFrameInfo info; info.videoFPS = fSubsession.videoFPS(); info.videoWidth = fSubsession.videoWidth(); info.videoHeight = fSubsession.videoHeight(); info.frequency = fSubsession.rtpTimestampFrequency(); info.channels = fSubsession.numChannels(); info.profile_level_id = fSubsession.fmtp_profile_level_id(); strncpy((char*)&(info.mediaName), fSubsession.mediumName(), sizeof(info.mediaName)); strncpy((char*)&(info.codecName), fSubsession.codecName(), sizeof(info.codecName)); info.timestamp = presentationTime; if(fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { info.syncUseRTCP = false; } else { info.syncUseRTCP = true; } if(strcmp(fSubsession.mediumName(), "audio") == 0) { if (strcmp(fSubsession.codecName(), "MPEG4-GENERIC") == 0) { info.isHeader = 0; _cb(_channel, frameSize, (char*)fReceiveBuffer, info); } else if (strcmp(fSubsession.codecName(), "L16") == 0) { int i = fSubsession.numChannels(); info.isHeader = 0; _cb(_channel, frameSize, (char*)fReceiveBuffer, info); } } else if(strcmp(fSubsession.mediumName(), "video") == 0) { if(strcmp(fSubsession.codecName(), "H264") == 0) { unsigned char start_code[4] = {0x00, 0x00, 0x00, 0x01}; if(!_sentHeader) { _sentHeader = true; unsigned numSpropRecords; if(fSubsession.fmtp_spropparametersets() && 0 < strlen(fSubsession.fmtp_spropparametersets())) { SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSpropRecords); printf("====================== proparamset: [%d]%s =================\n", numSpropRecords, fSubsession.fmtp_spropparametersets()); if(numSpropRecords > 0) { int headerLen = 0; int validRecordNum = 0; for(unsigned int i = 0; i < numSpropRecords; i++) { printf("spropparameter first byte = %x\n", sPropRecords[i].sPropBytes[0]); if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8)) { headerLen += sPropRecords[i].sPropLength; validRecordNum += 1; } } headerLen += sizeof(start_code) * validRecordNum; char* headerData = new char[headerLen]; int offset = 0; for(unsigned int i = 0; i < numSpropRecords; i++) { if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8)) { memcpy(headerData + offset, start_code, 4); offset += 4; memcpy(headerData + offset, sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength); offset += sPropRecords[i].sPropLength; } } uint16_t w = 0; uint16_t h = 0; if (H264Parse::GetResolution((uint8_t*)headerData, headerLen, &w, &h)) { _w = w; _h = h; } info.isHeader = 1; _cb(_channel, headerLen, headerData, info); delete [] headerData; } } } else { if ((fReceiveBuffer[0] & 0x1f) == 7) { uint16_t w = 0; uint16_t h = 0; if (H264Parse::GetResolution((uint8_t*)fReceiveBuffer, frameSize, &w, &h)) { if (_w == 0 || _h == 0) { _w = w; _h = h; } else if ((_w != w) || (_h != h)) { printf("=====33333333========= %dx%d, %dx%d\n", _w, _h, w, h); _drop = true; } } } } if (!_drop) { info.isHeader = 0; char* newData = new char[sizeof(start_code) + frameSize]; memcpy(newData, start_code, sizeof(start_code)); memcpy(newData + sizeof(start_code), (char*)fReceiveBuffer, frameSize); _cb(_channel, frameSize + sizeof(start_code), newData, info); delete [] newData; } } else if(strcmp(fSubsession.codecName(), "MP4V-ES") == 0) { #ifdef SEND_CONFIG_HEADER unsigned configLen; unsigned char* configData = parseGeneralConfigStr(fSubsession.fmtp_config(), configLen); info.isHeader = 1; _cb(_channel, configLen, (char*)configData, info); #endif info.isHeader = 0; _cb(_channel, frameSize, (char*)fReceiveBuffer, info); } else { info.isHeader = 0; _cb(_channel, frameSize, (char*)fReceiveBuffer, info); } } } // Then continue, to request the next frame of data: continuePlaying(); }
void DefaultSource::doStopGettingFrames() { envir().taskScheduler().unscheduleDelayedTask(nextTask()); }