GF_AbstractTSMuxer * ts_amux_new(GF_AVRedirect * avr, u32 videoBitrateInBitsPerSec, u32 width, u32 height, u32 audioBitRateInBitsPerSec) {
	GF_AbstractTSMuxer * ts = gf_malloc( sizeof(GF_AbstractTSMuxer));
	memset( ts, 0, sizeof( GF_AbstractTSMuxer));
	ts->oc = avformat_alloc_context();
	ts->destination = avr->destination;
	av_register_all();
	ts->oc->oformat = GUESS_FORMAT(NULL, avr->destination, NULL);
	if (!ts->oc->oformat)
		ts->oc->oformat = GUESS_FORMAT("mpegts", NULL, NULL);
	assert( ts->oc->oformat);
#if REDIRECT_AV_AUDIO_ENABLED
	ts->audio_st = av_new_stream(ts->oc, avr->audioCodec->id);
	{
		AVCodecContext * c = ts->audio_st->codec;
		c->codec_id = avr->audioCodec->id;
		c->codec_type = AVMEDIA_TYPE_AUDIO;
		/* put sample parameters */
		c->sample_fmt = SAMPLE_FMT_S16;
		c->bit_rate = audioBitRateInBitsPerSec;
		c->sample_rate = avr->audioSampleRate;
		c->channels = 2;
		c->time_base.num = 1;
		c->time_base.den = 1000;
		// some formats want stream headers to be separate
		if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
			c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
#endif

	ts->video_st = av_new_stream(ts->oc, avr->videoCodec->id);
	{
		AVCodecContext * c = ts->video_st->codec;
		c->codec_id = avr->videoCodec->id;
		c->codec_type = AVMEDIA_TYPE_VIDEO;

		/* put sample parameters */
		c->bit_rate = videoBitrateInBitsPerSec;
		/* resolution must be a multiple of two */
		c->width = width;
		c->height = height;
		/* time base: this is the fundamental unit of time (in seconds) in terms
		   of which frame timestamps are represented. for fixed-fps content,
		   timebase should be 1/framerate and timestamp increments should be
		   identically 1. */
		c->time_base.den = STREAM_FRAME_RATE;
		c->time_base.num = 1;
		c->gop_size = 12; /* emit one intra frame every twelve frames at most */
		c->pix_fmt = STREAM_PIX_FMT;
		if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
			/* just for testing, we also add B frames */
			c->max_b_frames = 2;
		}
		if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
			/* Needed to avoid using macroblocks in which some coeffs overflow.
			   This does not happen with normal video, it just happens here as
			   the motion of the chroma plane does not match the luma plane. */
			c->mb_decision=2;
		}
		// some formats want stream headers to be separate
		if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
			c->flags |= CODEC_FLAG_GLOBAL_HEADER;

	}
	//av_set_pts_info(ts->audio_st, 33, 1, audioBitRateInBitsPerSec);

#ifndef AVIO_FLAG_WRITE
	/* set the output parameters (must be done even if no
	   parameters). */
	if (av_set_parameters(ts->oc, NULL) < 0) {
		fprintf(stderr, "Invalid output format parameters\n");
		return NULL;
	}
#endif

	dump_format(ts->oc, 0, avr->destination, 1);
	GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] DUMPING to %s...\n", ts->destination));

#if (LIBAVCODEC_VERSION_MAJOR<55)
	if (avcodec_open(ts->video_st->codec, avr->videoCodec) < 0) {
#else
	if (avcodec_open2(ts->video_st->codec, avr->videoCodec, NULL) < 0) {
#endif
		GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open video codec\n"));
		return NULL;
	}
#if REDIRECT_AV_AUDIO_ENABLED
#if (LIBAVCODEC_VERSION_MAJOR<55)
	if (avcodec_open(ts->audio_st->codec, avr->audioCodec) < 0) {
#else
	if (avcodec_open2(ts->audio_st->codec, avr->audioCodec, NULL) < 0) {
#endif
		GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open audio codec\n"));
		return NULL;
	}
	ts->audioMx = gf_mx_new("TS_AudioMx");
#endif
	ts->videoMx = gf_mx_new("TS_VideoMx");
	ts->tsEncodingThread = gf_th_new("ts_interleave_thread_run");
	ts->encode = 1;
	ts->audioPackets = NULL;
	ts->videoPackets = NULL;
	gf_th_run(ts->tsEncodingThread, ts_interleave_thread_run, ts);
	return ts;
}

void ts_amux_del(GF_AbstractTSMuxer * muxerToDelete) {
	if (!muxerToDelete)
		return;
	muxerToDelete->encode = 0;
	gf_sleep(100);
	gf_th_stop(muxerToDelete->tsEncodingThread);
	muxerToDelete->tsEncodingThread = NULL;
#if REDIRECT_AV_AUDIO_ENABLED
	gf_mx_del(muxerToDelete->audioMx);
	muxerToDelete->audioMx = NULL;
#endif
	gf_mx_del(muxerToDelete->videoMx);
	muxerToDelete->videoMx = NULL;
	if (muxerToDelete->video_st) {
		avcodec_close(muxerToDelete->video_st->codec);
		muxerToDelete->video_st = NULL;
	}
#if REDIRECT_AV_AUDIO_ENABLED
	if (muxerToDelete->audio_st) {
		avcodec_close(muxerToDelete->audio_st->codec);
		muxerToDelete->audio_st = NULL;
	}
#endif
	/* write the trailer, if any.  the trailer must be written
	 * before you close the CodecContexts open when you wrote the
	 * header; otherwise write_trailer may try to use memory that
	 * was freed on av_codec_close() */
	if (muxerToDelete->oc) {
		u32 i;
		/* free the streams */
		for (i = 0; i < muxerToDelete->oc->nb_streams; i++) {
			av_freep(&muxerToDelete->oc->streams[i]->codec);
			av_freep(&muxerToDelete->oc->streams[i]);
		}

		/* free the stream */
		av_free(muxerToDelete->oc);
		muxerToDelete->oc = NULL;
	}
}

Bool ts_encode_audio_frame(GF_AbstractTSMuxer * ts, uint8_t * data, int encoded, u64 pts) {
	AVPacketList *pl;
	AVPacket * pkt;
	if (!ts->encode)
		return 1;
	pl = gf_malloc(sizeof(AVPacketList));
	pl->next = NULL;
	pkt = &(pl->pkt);
	av_init_packet(pkt);
	assert( ts->audio_st);
	assert( ts->audio_st->codec);
	pkt->flags = 0;
	if (ts->audio_st->codec->coded_frame) {
		if (ts->audio_st->codec->coded_frame->key_frame)
			pkt->flags = AV_PKT_FLAG_KEY;
		if (ts->audio_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
			pkt->pts = av_rescale_q(ts->audio_st->codec->coded_frame->pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
		} else {
			if (pts == AV_NOPTS_VALUE)
				pkt->pts = AV_NOPTS_VALUE;
			else {
				pkt->pts = av_rescale_q(pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
			}
		}
	} else {
		if (pts == AV_NOPTS_VALUE)
			pkt->pts = AV_NOPTS_VALUE;
		else
			pkt->pts = av_rescale_q(pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
	}
	pkt->stream_index= ts->audio_st->index;
	pkt->data = data;
	pkt->size = encoded;
	//fprintf(stderr, "AUDIO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, pts, pl);
	gf_mx_p(ts->audioMx);
	if (!ts->audioPackets)
		ts->audioPackets = pl;
	else {
		AVPacketList * px = ts->audioPackets;
		while (px->next)
			px = px->next;
		px->next = pl;
	}
	gf_mx_v(ts->audioMx);
	return 0;
}

Bool ts_encode_video_frame(GF_AbstractTSMuxer* ts, uint8_t* data, int encoded) {
	AVPacketList *pl;
	AVPacket * pkt;
	if (!ts->encode)
		return 1;
	pl = gf_malloc(sizeof(AVPacketList));
	pl->next = NULL;
	pkt = &(pl->pkt);

	av_init_packet(pkt);

	if (ts->video_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
		//pkt->pts= av_rescale_q(ts->video_st->codec->coded_frame->pts, ts->video_st->codec->time_base, ts->video_st->time_base);
		pkt->pts = ts->video_st->codec->coded_frame->pts * ts->video_st->time_base.den / ts->video_st->time_base.num / 1000;
		//pkt->pts = ts->video_st->codec->coded_frame->pts;
	}
	if (ts->video_st->codec->coded_frame->key_frame)
		pkt->flags |= AV_PKT_FLAG_KEY;
	pkt->stream_index= ts->video_st->index;
	pkt->data= data;
	pkt->size= encoded;
	//fprintf(stderr, "VIDEO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, ts->video_st->codec->coded_frame->pts, pl);
	gf_mx_p(ts->videoMx);
	if (!ts->videoPackets)
		ts->videoPackets = pl;
	else {
		AVPacketList * px = ts->videoPackets;
		while (px->next)
			px = px->next;
		px->next = pl;
	}
	gf_mx_v(ts->videoMx);
	return 0;
}
Exemple #2
0
GF_AbstractTSMuxer * ts_amux_new(GF_AVRedirect * avr, u32 videoBitrateInBitsPerSec, u32 width, u32 height, u32 audioBitRateInBitsPerSec) {
    GF_AbstractTSMuxer * ts = gf_malloc( sizeof(GF_AbstractTSMuxer));
    memset( ts, 0, sizeof( GF_AbstractTSMuxer));
    ts->oc = avformat_alloc_context();
    ts->destination = avr->destination;
    av_register_all();
    ts->oc->oformat = GUESS_FORMAT(NULL, avr->destination, NULL);
    if (!ts->oc->oformat)
        ts->oc->oformat = GUESS_FORMAT("mpegts", NULL, NULL);
    assert( ts->oc->oformat);
#if REDIRECT_AV_AUDIO_ENABLED
    ts->audio_st = av_new_stream(ts->oc, avr->audioCodec->id);
    {
        AVCodecContext * c = ts->audio_st->codec;
        c->codec_id = avr->audioCodec->id;
        c->codec_type = AVMEDIA_TYPE_AUDIO;
        /* put sample parameters */
        c->sample_fmt = SAMPLE_FMT_S16;
        c->bit_rate = audioBitRateInBitsPerSec;
        c->sample_rate = avr->audioSampleRate;
        c->channels = 2;
        c->time_base.num = 1;
        c->time_base.den = 1000;
        // some formats want stream headers to be separate
        if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
#endif

    ts->video_st = av_new_stream(ts->oc, avr->videoCodec->id);
    {
        AVCodecContext * c = ts->video_st->codec;
        c->codec_id = avr->videoCodec->id;
        c->codec_type = AVMEDIA_TYPE_VIDEO;

        /* put sample parameters */
        c->bit_rate = videoBitrateInBitsPerSec;
        /* resolution must be a multiple of two */
        c->width = width;
        c->height = height;
        /* time base: this is the fundamental unit of time (in seconds) in terms
           of which frame timestamps are represented. for fixed-fps content,
           timebase should be 1/framerate and timestamp increments should be
           identically 1. */
        c->time_base.den = STREAM_FRAME_RATE;
        c->time_base.num = 1;
        c->gop_size = 12; /* emit one intra frame every twelve frames at most */
        c->pix_fmt = STREAM_PIX_FMT;
        if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
            /* just for testing, we also add B frames */
            c->max_b_frames = 2;
        }
        if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
            /* Needed to avoid using macroblocks in which some coeffs overflow.
               This does not happen with normal video, it just happens here as
               the motion of the chroma plane does not match the luma plane. */
            c->mb_decision=2;
        }
        // some formats want stream headers to be separate
        if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    }
    //av_set_pts_info(ts->audio_st, 33, 1, audioBitRateInBitsPerSec);

#ifndef AVIO_FLAG_WRITE
	/* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(ts->oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        return NULL;
    }
#endif

	dump_format(ts->oc, 0, avr->destination, 1);
    GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] DUMPING to %s...\n", ts->destination));

    if (avcodec_open(ts->video_st->codec, avr->videoCodec) < 0) {
        GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open video codec\n"));
        return NULL;
    }
#if REDIRECT_AV_AUDIO_ENABLED
    if (avcodec_open(ts->audio_st->codec, avr->audioCodec) < 0) {
        GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open audio codec\n"));
        return NULL;
    }
    ts->audioMx = gf_mx_new("TS_AudioMx");
#endif
    ts->videoMx = gf_mx_new("TS_VideoMx");
    ts->tsEncodingThread = gf_th_new("ts_interleave_thread_run");
    ts->encode = 1;
    ts->audioPackets = NULL;
    ts->videoPackets = NULL;
    gf_th_run(ts->tsEncodingThread, ts_interleave_thread_run, ts);
    return ts;
}