예제 #1
0
static int dash_live_onflv(void* param, int codec, const void* data, size_t bytes, uint32_t pts, uint32_t dts, int flags)
{
    struct mpeg4_aac_t aac;
    struct mpeg4_avc_t avc;
    struct mpeg4_hevc_t hevc;
    dash_playlist_t* dash = (dash_playlist_t*)param;

    switch (codec)
    {
    case FLV_VIDEO_AVCC:
        if (-1 == dash->adapation_video && mpeg4_avc_decoder_configuration_record_load((const uint8_t*)data, bytes, &avc) > 0)
            dash->adapation_video = dash_mpd_add_video_adaptation_set(dash->mpd, dash->name.c_str(), MOV_OBJECT_H264, dash->width, dash->height, data, bytes);
        break;

    case FLV_VIDEO_HVCC:
        if (-1 == dash->adapation_video && mpeg4_hevc_decoder_configuration_record_load((const uint8_t*)data, bytes, &hevc) > 0)
            dash->adapation_video = dash_mpd_add_video_adaptation_set(dash->mpd, dash->name.c_str(), MOV_OBJECT_HEVC, dash->width, dash->height, data, bytes);
        break;

    case FLV_AUDIO_ASC:
        if (-1 == dash->adapation_audio && mpeg4_aac_audio_specific_config_load((const uint8_t*)data, bytes, &aac) > 0)
        {
            int rate = mpeg4_aac_audio_frequency_to((enum mpeg4_aac_frequency)aac.sampling_frequency_index);
            dash->adapation_audio = dash_mpd_add_audio_adaptation_set(dash->mpd, dash->name.c_str(), MOV_OBJECT_AAC, aac.channel_configuration, 32, rate, data, bytes);
        }
        break;

    case FLV_AUDIO_AAC:
        return dash_mpd_input(dash->mpd, dash->adapation_audio, data, bytes, pts, dts, 0);

    case FLV_VIDEO_H264:
        return dash_mpd_input(dash->mpd, dash->adapation_video, data, bytes, pts, dts, flags ? MOV_AV_FLAG_KEYFREAME : 0);

    case FLV_VIDEO_H265:
        return dash_mpd_input(dash->mpd, dash->adapation_video, data, bytes, pts, dts, flags ? MOV_AV_FLAG_KEYFREAME : 0);

    default:
        assert(0);
    }
    return 0;
}
예제 #2
0
void MP4FileSource::MP4OnAudio(void* param, uint32_t track, uint8_t object, int channel_count, int /*bit_per_sample*/, int sample_rate, const void* extra, size_t bytes)
{
	int n = 0;
	MP4FileSource* self = (MP4FileSource*)param;
	struct media_t* m = &self->m_media[self->m_count++];
	m->track = track;
	m->rtcp_clock = 0;
	m->ssrc = (uint32_t)rtp_ssrc();
	m->timestamp = m->ssrc;
	m->bandwidth = 128 * 1024;
	m->dts_first = -1;
	m->dts_last = -1;

	if (MOV_OBJECT_AAC == object)
	{
		struct mpeg4_aac_t aac;
		//aac.profile = MPEG4_AAC_LC;
		//aac.channel_configuration = (uint8_t)channel_count;
		//aac.sampling_frequency_index = (uint8_t)mpeg4_aac_audio_frequency_from(sample_rate);
		mpeg4_aac_audio_specific_config_load((const uint8_t*)extra, bytes, &aac);
		assert(aac.sampling_frequency_index == (uint8_t)mpeg4_aac_audio_frequency_from(sample_rate));
		assert(aac.channel_configuration == channel_count);

		if (1)
		{
			// RFC 6416
			// In the presence of SBR, the sampling rates for the core encoder/
			// decoder and the SBR tool are different in most cases. Therefore,
			// this parameter SHALL NOT be considered as the definitive sampling rate.
			static const char* pattern =
				"m=audio 0 RTP/AVP %d\n"
				"a=rtpmap:%d MP4A-LATM/%d/%d\n"
				"a=fmtp:%d profile-level-id=%d;object=%d;cpresent=0;config=";

			sample_rate = 90000;
			n = snprintf((char*)self->m_frame.buffer, sizeof(self->m_frame.buffer), pattern,
				RTP_PAYLOAD_MP4A, RTP_PAYLOAD_MP4A, sample_rate, channel_count, 
				RTP_PAYLOAD_MP4A, mpeg4_aac_profile_level(&aac), aac.profile);

			uint8_t config[6];
			int r = mpeg4_aac_stream_mux_config_save(&aac, config, sizeof(config));
			static const char* hex = "0123456789abcdef";
			for (int i = 0; i < r; i++)
			{
				self->m_frame.buffer[n++] = hex[config[i] >> 4];
				self->m_frame.buffer[n++] = hex[config[i] & 0x0F];
			}
			self->m_frame.buffer[n] = '\0';

			snprintf(m->name, sizeof(m->name), "%s", "MP4A-LATM");
		}
		else
		{
			// RFC 3640 3.3.1. General (p21)
			// a=rtpmap:<payload type> <encoding name>/<clock rate>[/<encoding parameters > ]
			// For audio streams, <encoding parameters> specifies the number of audio channels
			// streamType: AudioStream
			// When using SDP, the clock rate of the RTP time stamp MUST be expressed using the "rtpmap" attribute. 
			// If an MPEG-4 audio stream is transported, the rate SHOULD be set to the same value as the sampling rate of the audio stream. 
			// If an MPEG-4 video stream transported, it is RECOMMENDED that the rate be set to 90 kHz.
			static const char* pattern =
				"m=audio 0 RTP/AVP %d\n"
				"a=rtpmap:%d MPEG4-GENERIC/%d/%d\n"
				"a=fmtp:%d streamType=5;profile-level-id=1;mode=AAC-hbr;sizelength=13;indexlength=3;indexdeltalength=3;config=";

			n = snprintf((char*)self->m_frame.buffer, sizeof(self->m_frame.buffer), pattern,
				RTP_PAYLOAD_MP4A, RTP_PAYLOAD_MP4A, sample_rate, channel_count, RTP_PAYLOAD_MP4A);

			// For MPEG-4 Audio streams, config is the audio object type specific
			// decoder configuration data AudioSpecificConfig()
			n += base64_encode((char*)self->m_frame.buffer + n, extra, bytes);
			self->m_frame.buffer[n] = '\0';

			snprintf(m->name, sizeof(m->name), "%s", "MPEG4-GENERIC");
		}

		m->frequency = sample_rate;
		m->payload = RTP_PAYLOAD_MP4A;
		self->m_frame.buffer[n++] = '\n';
	}