char* RTPSink::rtpmapLine() const { if (rtpPayloadType() >= 96) { // the payload format type is dynamic char* encodingParamsPart; if (numChannels() != 1) { encodingParamsPart = new char[1 + 20 /* max int len */]; sprintf(encodingParamsPart, "/%d", numChannels()); } else { encodingParamsPart = strDup(""); } char const* const rtpmapFmt = "a=rtpmap:%d %s/%d%s\r\n"; unsigned rtpmapFmtSize = strlen(rtpmapFmt) + 3 /* max char len */ + strlen(rtpPayloadFormatName()) + 20 /* max int len */ + strlen(encodingParamsPart); char* rtpmapLine = new char[rtpmapFmtSize]; sprintf(rtpmapLine, rtpmapFmt, rtpPayloadType(), rtpPayloadFormatName(), rtpTimestampFrequency(), encodingParamsPart); delete[] encodingParamsPart; return rtpmapLine; } else { // The payload format is staic, so there's no "a=rtpmap:" line: return strDup(""); } }
MPEG4LATMAudioRTPSink ::MPEG4LATMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* streamMuxConfigString, unsigned numChannels, Boolean allowMultipleFramesPerPacket) : AudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MP4A-LATM", numChannels), fStreamMuxConfigString(strDup(streamMuxConfigString)), fAllowMultipleFramesPerPacket(allowMultipleFramesPerPacket) { // Set up the "a=fmtp:" SDP line for this stream: char const* fmtpFmt = "a=fmtp:%d " "cpresent=0;config=%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + strlen(fStreamMuxConfigString); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), fStreamMuxConfigString); fFmtpSDPLine = strDup(fmtp); delete[] fmtp; }
char const* AMRAudioRTPSink::auxSDPLine() { if (fFmtpSDPLine == NULL) { // Generate a "a=fmtp:" line with "octet-aligned=1" // (That is the only non-default parameter.) char buf[100]; sprintf(buf, "a=fmtp:%d octet-align=1\r\n", rtpPayloadType()); delete[] fFmtpSDPLine; fFmtpSDPLine = strDup(buf); } return fFmtpSDPLine; }
char const* H264VideoRTPSink::auxSDPLine() { liveLogInfo(" H264VideoRTPSink::auxSDPLine \n"); // Generate a new "a=fmtp:" line each time, using our SPS and PPS (if we have them), // otherwise parameters from our framer source (in case they've changed since the last time that // we were called): H264or5VideoStreamFramer* framerSource = NULL; u_int8_t* vpsDummy = NULL; unsigned vpsDummySize = 0; u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize; u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize; if (sps == NULL || pps == NULL) { // We need to get SPS and PPS from our framer source: if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source) framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource()); if (framerSource == NULL) return NULL; // we don't yet have a source framerSource->getVPSandSPSandPPS(vpsDummy, vpsDummySize, sps, spsSize, pps, ppsSize); if (sps == NULL || pps == NULL) return NULL; // our source isn't ready } // Set up the "a=fmtp:" SDP line for this stream: u_int8_t* spsWEB = new u_int8_t[spsSize]; // "WEB" means "Without Emulation Bytes" unsigned spsWEBSize = removeH264or5EmulationBytes(spsWEB, spsSize, sps, spsSize); if (spsWEBSize < 4) { // Bad SPS size => assume our source isn't ready delete[] spsWEB; return NULL; } u_int32_t profileLevelId = (spsWEB[1]<<16) | (spsWEB[2]<<8) | spsWEB[3]; delete[] spsWEB; char* sps_base64 = base64Encode((char*)sps, spsSize); char* pps_base64 = base64Encode((char*)pps, ppsSize); char const* fmtpFmt = "a=fmtp:%d packetization-mode=1" ";profile-level-id=%06X" ";sprop-parameter-sets=%s,%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 6 /* 3 bytes in hex */ + strlen(sps_base64) + strlen(pps_base64); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), profileLevelId, sps_base64, pps_base64); delete[] sps_base64; delete[] pps_base64; delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp; return fFmtpSDPLine; }
char const* DVVideoRTPSink::auxSDPLineFromFramer(DVVideoStreamFramer* framerSource) { char const* const profileName = framerSource->profileName(); if (profileName == NULL) return NULL; char const* const fmtpSDPFmt = "a=fmtp:%d encode=%s;audio=bundled\r\n"; unsigned fmtpSDPFmtSize = strlen(fmtpSDPFmt) + 3 // max payload format code length + strlen(profileName); delete[] fFmtpSDPLine; // if it already exists fFmtpSDPLine = new char[fmtpSDPFmtSize]; sprintf(fFmtpSDPLine, fmtpSDPFmt, rtpPayloadType(), profileName); return fFmtpSDPLine; }
char const *H264VideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using parameters from // our framer source (in case they've changed since the last time that // we were called): if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source) H264VideoStreamFramer *framerSource = (H264VideoStreamFramer *)(fOurFragmenter->inputSource()); if (framerSource == NULL) return NULL; // we don't yet have a source u_int8_t *sps; unsigned spsSize; u_int8_t *pps; unsigned ppsSize; framerSource->getSPSandPPS(sps, spsSize, pps, ppsSize); if (sps == NULL || pps == NULL) return NULL; // our source isn't ready u_int32_t profile_level_id; if (spsSize < 4) // sanity check { profile_level_id = 0; } else { profile_level_id = (sps[1] << 16) | (sps[2] << 8) | sps[3]; // profile_idc|constraint_setN_flag|level_idc } // Set up the "a=fmtp:" SDP line for this stream: char *sps_base64 = base64Encode((char *)sps, spsSize); char *pps_base64 = base64Encode((char *)pps, ppsSize); char const *fmtpFmt = "a=fmtp:%d packetization-mode=1" ";profile-level-id=%06X" ";sprop-parameter-sets=%s,%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 6 /* 3 bytes in hex */ + strlen(sps_base64) + strlen(pps_base64); char *fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), profile_level_id, sps_base64, pps_base64); delete[] sps_base64; delete[] pps_base64; delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp; return fFmtpSDPLine; }
MPEG4GenericRTPSink ::MPEG4GenericRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* sdpMediaTypeString, char const* mpeg4Mode, char const* configString, unsigned numChannels) : MultiFramedRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MPEG4-GENERIC", numChannels), fSDPMediaTypeString(strDup(sdpMediaTypeString)), fMPEG4Mode(strDup(mpeg4Mode)), fConfigString(strDup(configString)) { // Check whether "mpeg4Mode" is one that we handle: if (mpeg4Mode == NULL) { env << "MPEG4GenericRTPSink error: NULL \"mpeg4Mode\" parameter\n"; } else { // To ease comparison, convert "mpeg4Mode" to lower case: size_t const len = strlen(mpeg4Mode) + 1; char* m = new char[len]; Locale l("POSIX"); for (size_t i = 0; i < len; ++i) m[i] = tolower(mpeg4Mode[i]); if (strcmp(m, "aac-hbr") != 0) { env << "MPEG4GenericRTPSink error: Unknown \"mpeg4Mode\" parameter: \"" << mpeg4Mode << "\"\n"; } delete[] m; } // Set up the "a=fmtp:" SDP line for this stream: char const* fmtpFmt = "a=fmtp:%d " "streamtype=%d;profile-level-id=1;" "mode=%s;sizelength=13;indexlength=3;indexdeltalength=3;" "config=%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 3 /* max char len */ + strlen(fMPEG4Mode) + strlen(fConfigString); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), strcmp(fSDPMediaTypeString, "video") == 0 ? 4 : 5, fMPEG4Mode, fConfigString); fFmtpSDPLine = strDup(fmtp); delete[] fmtp; }
char const* MPEG4ESVideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using our own 'configuration' information (if we have it), // otherwise parameters from our framer source (in case they've changed since the last time that // we were called): unsigned configLength = fNumConfigBytes; unsigned char* config = fConfigBytes; if (fProfileAndLevelIndication == 0 || config == NULL) { // We need to get this information from our framer source: MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource; if (framerSource == NULL) return NULL; // we don't yet have a source fProfileAndLevelIndication = framerSource->profile_and_level_indication(); if (fProfileAndLevelIndication == 0) return NULL; // our source isn't ready config = framerSource->getConfigBytes(configLength); if (config == NULL) return NULL; // our source isn't ready } char const* fmtpFmt = "a=fmtp:%d " "profile-level-id=%d;" "config="; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 3 /* max char len */ + 2*configLength /* 2*, because each byte prints as 2 chars */ + 2 /* trailing \r\n */; char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), fProfileAndLevelIndication); char* endPtr = &fmtp[strlen(fmtp)]; for (unsigned i = 0; i < configLength; ++i) { sprintf(endPtr, "%02X", config[i]); endPtr += 2; } sprintf(endPtr, "\r\n"); delete[] fFmtpSDPLine; fFmtpSDPLine = strDup(fmtp); delete[] fmtp; return fFmtpSDPLine; }
char const* MPEG4ESVideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using parameters from // our framer source (in case they've changed since the last time that // we were called): MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource; if (framerSource == NULL) return NULL; // we don't yet have a source u_int8_t profile_level_id = framerSource->profile_and_level_indication(); if (profile_level_id == 0) return NULL; // our source isn't ready unsigned configLength; unsigned char* config = framerSource->getConfigBytes(configLength); if (config == NULL) return NULL; // our source isn't ready char const* fmtpFmt = "a=fmtp:%d " "profile-level-id=%d;" "config="; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 3 /* max char len */ + 2*configLength /* 2*, because each byte prints as 2 chars */ + 2 /* trailing \r\n */; char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), profile_level_id); char* endPtr = &fmtp[strlen(fmtp)]; for (unsigned i = 0; i < configLength; ++i) { sprintf(endPtr, "%02X", config[i]); endPtr += 2; } sprintf(endPtr, "\r\n"); delete[] fAuxSDPLine; fAuxSDPLine = strDup(fmtp); delete[] fmtp; return fAuxSDPLine; }
char const* H265VideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using our VPS, SPS and PPS (if we have them), // otherwise parameters from our framer source (in case they've changed since the last time that // we were called): H264or5VideoStreamFramer* framerSource = NULL; u_int8_t* vps = fVPS; unsigned vpsSize = fVPSSize; u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize; u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize; if (vps == NULL || sps == NULL || pps == NULL) { // We need to get VPS, SPS and PPS from our framer source: if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source) framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource()); if (framerSource == NULL) return NULL; // we don't yet have a source framerSource->getVPSandSPSandPPS(vps, vpsSize, sps, spsSize, pps, ppsSize); if (vps == NULL || sps == NULL || pps == NULL) { return NULL; // our source isn't ready } } // Set up the "a=fmtp:" SDP line for this stream. u_int8_t* vpsWEB = new u_int8_t[vpsSize]; // "WEB" means "Without Emulation Bytes" unsigned vpsWEBSize = removeH264or5EmulationBytes(vpsWEB, vpsSize, vps, vpsSize); if (vpsWEBSize < 6/*'profile_tier_level' offset*/ + 12/*num 'profile_tier_level' bytes*/) { // Bad VPS size => assume our source isn't ready delete[] vpsWEB; return NULL; } u_int8_t const* profileTierLevelHeaderBytes = &vpsWEB[6]; unsigned profileSpace = profileTierLevelHeaderBytes[0]>>6; // general_profile_space unsigned profileId = profileTierLevelHeaderBytes[0]&0x1F; // general_profile_idc unsigned tierFlag = (profileTierLevelHeaderBytes[0]>>5)&0x1; // general_tier_flag unsigned levelId = profileTierLevelHeaderBytes[11]; // general_level_idc u_int8_t const* interop_constraints = &profileTierLevelHeaderBytes[5]; char interopConstraintsStr[100]; sprintf(interopConstraintsStr, "%02X%02X%02X%02X%02X%02X", interop_constraints[0], interop_constraints[1], interop_constraints[2], interop_constraints[3], interop_constraints[4], interop_constraints[5]); delete[] vpsWEB; char* sprop_vps = base64Encode((char*)vps, vpsSize); char* sprop_sps = base64Encode((char*)sps, spsSize); char* sprop_pps = base64Encode((char*)pps, ppsSize); char const* fmtpFmt = "a=fmtp:%d profile-space=%u" ";profile-id=%u" ";tier-flag=%u" ";level-id=%u" ";interop-constraints=%s" ";sprop-vps=%s" ";sprop-sps=%s" ";sprop-pps=%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max num chars: rtpPayloadType */ + 20 /* max num chars: profile_space */ + 20 /* max num chars: profile_id */ + 20 /* max num chars: tier_flag */ + 20 /* max num chars: level_id */ + strlen(interopConstraintsStr) + strlen(sprop_vps) + strlen(sprop_sps) + strlen(sprop_pps); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), profileSpace, profileId, tierFlag, levelId, interopConstraintsStr, sprop_vps, sprop_sps, sprop_pps); delete[] sprop_vps; delete[] sprop_sps; delete[] sprop_pps; delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp; return fFmtpSDPLine; }