Пример #1
21
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
  int ret;
  static struct SwsContext *sws_ctx;
  AVCodecContext *c = st->codec;

  if (frame_count >= STREAM_NB_FRAMES) {
    /* No more frames to compress. The codec has a latency of a few
     * frames if using B-frames, so we get the last frames by
     * passing the same picture again. */
  } else {
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
      /* as we only generate a YUV420P picture, we must convert it
       * to the codec pixel format if needed */
      if (!sws_ctx) {
        sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
                                 c->width, c->height, c->pix_fmt,
                                 sws_flags, NULL, NULL, NULL);
        if (!sws_ctx) {
          fprintf(stderr,
                  "Could not initialize the conversion context\n");
          exit(1);
        }
      }
      fill_yuv_image(&src_picture, frame_count, c->width, c->height);
      sws_scale(sws_ctx,
                (const uint8_t * const *)src_picture.data, src_picture.linesize,
                0, c->height, dst_picture.data, dst_picture.linesize);
    } else {
      fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
    }
  }

  if (oc->oformat->flags & AVFMT_RAWPICTURE) {
    /* Raw video case - directly store the picture in the packet */
    AVPacket pkt;
    av_init_packet(&pkt);

    pkt.flags        |= AV_PKT_FLAG_KEY;
    pkt.stream_index  = st->index;
    pkt.data          = dst_picture.data[0];
    pkt.size          = sizeof(AVPicture);

    ret = av_interleaved_write_frame(oc, &pkt);
  } else {
    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);

    /* encode the image */
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
      fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
      exit(1);
    }
    /* If size is zero, it means the image was buffered. */

    if (!ret && got_packet && pkt.size) {
      pkt.stream_index = st->index;

      /* Write the compressed frame to the media file. */
      ret = av_interleaved_write_frame(oc, &pkt);
    } else {
      ret = 0;
    }
  }
  if (ret != 0) {
    fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
    exit(1);
  }
  frame_count++;
}
Пример #2
0
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
  AVCodecContext *c;
  AVPacket pkt = { 0 }; // data and size must be 0;
  AVFrame *frame = avcodec_alloc_frame();
  int got_packet, ret, dst_nb_samples;

  av_init_packet(&pkt);
  c = st->codec;

  get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);

  /* convert samples from native format to destination codec format, using the resampler */
  if (swr_ctx) {
    /* compute destination number of samples */
    dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
                                    c->sample_rate, c->sample_rate, AV_ROUND_UP);
    if (dst_nb_samples > max_dst_nb_samples) {
      av_free(dst_samples_data[0]);
      ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
                             dst_nb_samples, c->sample_fmt, 0);
      if (ret < 0)
        exit(1);
      max_dst_nb_samples = dst_nb_samples;
      dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
                                                    c->sample_fmt, 0);
    }

    /* convert to destination format */
    ret = swr_convert(swr_ctx,
                      dst_samples_data, dst_nb_samples,
                      (const uint8_t **)src_samples_data, src_nb_samples);
    if (ret < 0) {
      fprintf(stderr, "Error while converting\n");
      exit(1);
    }
  } else {
    dst_samples_data[0] = src_samples_data[0];
    dst_nb_samples = src_nb_samples;
  }

  frame->nb_samples = dst_nb_samples;
  avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
                           dst_samples_data[0], dst_samples_size, 0);

  ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
  if (ret < 0) {
    fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
    exit(1);
  }

  if (!got_packet)
    return;

  pkt.stream_index = st->index;

  /* Write the compressed frame to the media file. */
  ret = av_interleaved_write_frame(oc, &pkt);
  if (ret != 0) {
    fprintf(stderr, "Error while writing audio frame: %s\n",
            av_err2str(ret));
    exit(1);
  }
  avcodec_free_frame(&frame);
}
Пример #3
0
bool StAVVideoMuxer::save(const StString& theFile) {
    if(myCtxListSrc.isEmpty()
    || theFile.isEmpty()) {
        return false;
    }

    StString aFormatName = myCtxListSrc[0]->iformat->name;
    const char* aFormatStr = formatToMetadata(myStereoFormat);

    std::vector<StRemuxContext> aSrcCtxList;
    //StArrayList<StRemuxContext> aSrcCtxList;
    unsigned int aStreamCount = 0;

    StAVOutContext aCtxOut;
    if(!aCtxOut.findFormat(NULL, theFile.toCString())) {
        signals.onError(StString("Unable to find a suitable output format for '") + theFile + "'.");
        return false;
    } else if(!aCtxOut.create(theFile)) {
        signals.onError(StString("Could not create output context."));
        return false;
    }

    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext aCtxSrc;
        aCtxSrc.Context = myCtxListSrc[aCtxId];
        if(aCtxId == 0) {
            av_dict_copy(&aCtxOut.Context->metadata, aCtxSrc.Context->metadata, AV_DICT_DONT_OVERWRITE);
            av_dict_set(&aCtxOut.Context->metadata, "STEREO_MODE", aFormatStr, 0);
        }
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            aCtxSrc.Streams.add((unsigned int )-1);
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                if(addStream(aCtxOut.Context, aStreamSrc)) {
                    aCtxSrc.Streams[aStreamId] = aStreamCount++;
                }
            }
        }
        aSrcCtxList.push_back(aCtxSrc);
    }

    // add audio streams after video
    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type == AVMEDIA_TYPE_AUDIO
            && addStream(aCtxOut.Context, aStreamSrc)) {
                aCtxSrc.Streams[aStreamId] = aStreamCount++;
            }
        }
    }

    // add other streams (subtitles) at the end
    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type != AVMEDIA_TYPE_VIDEO
            && aStreamSrc->codec->codec_type != AVMEDIA_TYPE_AUDIO
            && addStream(aCtxOut.Context, aStreamSrc)) {
                aCtxSrc.Streams[aStreamId] = aStreamCount++;
            }
        }
    }

    av_dump_format(aCtxOut.Context, 0, theFile.toCString(), 1);
    if(!(aCtxOut.Context->oformat->flags & AVFMT_NOFILE)) {
        const int aState = avio_open2(&aCtxOut.Context->pb, theFile.toCString(), AVIO_FLAG_WRITE, NULL, NULL);
        if(aState < 0) {
            signals.onError(StString("Could not open output file '") + theFile + "' (" + stAV::getAVErrorDescription(aState) + ")");
            return false;
        }
    }

    int aState = avformat_write_header(aCtxOut.Context, NULL);
    if(aState < 0) {
        signals.onError(StString("Error occurred when opening output file (") + stAV::getAVErrorDescription(aState) + ").");
        return false;
    }

    AVPacket aPacket;
    for(;;) {
        size_t aNbEmpty = 0;
        for(size_t aCtxId = 0; aCtxId < aSrcCtxList.size(); ++aCtxId) {
            StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
            if(!aCtxSrc.State) {
                ++aNbEmpty;
                continue;
            }

            if(av_read_frame(aCtxSrc.Context, &aPacket) < 0) {
                aCtxSrc.State = false;
                ++aNbEmpty;
                continue;
            }

            unsigned int aStreamOutIndex = aCtxSrc.Streams[aPacket.stream_index];
            if(aStreamOutIndex == (unsigned int )-1) {
                continue;
            }

            AVStream* aStreamIn  = aCtxSrc.Context->streams[aPacket.stream_index];
            AVStream* aStreamOut = aCtxOut.Context->streams[aStreamOutIndex];

        #ifdef ST_LIBAV_FORK
            const AVRounding aRoundParams = AV_ROUND_NEAR_INF;
        #else
            const AVRounding aRoundParams = AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
        #endif
            aPacket.pts      = av_rescale_q_rnd(aPacket.pts, aStreamIn->time_base, aStreamOut->time_base, aRoundParams);
            aPacket.dts      = av_rescale_q_rnd(aPacket.dts, aStreamIn->time_base, aStreamOut->time_base, aRoundParams);
            aPacket.duration = static_cast<int >(av_rescale_q(aPacket.duration, aStreamIn->time_base, aStreamOut->time_base));
            aPacket.pos      = -1;

            aState = av_interleaved_write_frame(aCtxOut.Context, &aPacket);
            if(aState < 0) {
                signals.onError(StString("Error muxing packet (") + stAV::getAVErrorDescription(aState) + ").");
                return false;
            }
            av_free_packet(&aPacket);
        }
        if(aNbEmpty == aSrcCtxList.size()) {
            break;
        }
    }
    av_write_trailer(aCtxOut.Context);
    return true;
}
int main(int argc, char* argv[])
{
	AVOutputFormat *ofmt_a = NULL,*ofmt_v = NULL;
	//(Input AVFormatContext and Output AVFormatContext)
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL;
	AVPacket pkt;
	int ret, i;
	int videoindex=-1,audioindex=-1;
	int frame_index=0;

	const char *in_filename  = "cuc_ieschool.ts";//Input file URL
	//char *in_filename  = "cuc_ieschool.mkv";
	const char *out_filename_v = "cuc_ieschool.h264";//Output file URL
	//char *out_filename_a = "cuc_ieschool.mp3";
	const char *out_filename_a = "cuc_ieschool.aac";

	av_register_all();
	//Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		goto end;
	}

	//Output
	avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v);
	if (!ofmt_ctx_v) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_v = ofmt_ctx_v->oformat;

	avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a);
	if (!ofmt_ctx_a) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_a = ofmt_ctx_a->oformat;

	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
			//Create output AVStream according to input AVStream
			AVFormatContext *ofmt_ctx;
			AVStream *in_stream = ifmt_ctx->streams[i];
			AVStream *out_stream = NULL;
			
			if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
				videoindex=i;
				out_stream=avformat_new_stream(ofmt_ctx_v, in_stream->codec->codec);
				ofmt_ctx=ofmt_ctx_v;
			}else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
				audioindex=i;
				out_stream=avformat_new_stream(ofmt_ctx_a, in_stream->codec->codec);
				ofmt_ctx=ofmt_ctx_a;
			}else{
				break;
			}
			
			if (!out_stream) {
				printf( "Failed allocating output stream\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			//Copy the settings of AVCodecContext
			if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
				printf( "Failed to copy context from input to output stream codec context\n");
				goto end;
			}
			out_stream->codec->codec_tag = 0;

			if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
				out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	//Dump Format------------------
	printf("\n==============Input Video=============\n");
	av_dump_format(ifmt_ctx, 0, in_filename, 0);
	printf("\n==============Output Video============\n");
	av_dump_format(ofmt_ctx_v, 0, out_filename_v, 1);
	printf("\n==============Output Audio============\n");
	av_dump_format(ofmt_ctx_a, 0, out_filename_a, 1);
	printf("\n======================================\n");
	//Open output file
	if (!(ofmt_v->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) {
			printf( "Could not open output file '%s'", out_filename_v);
			goto end;
		}
	}

	if (!(ofmt_a->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) {
			printf( "Could not open output file '%s'", out_filename_a);
			goto end;
		}
	}

	//Write file header
	if (avformat_write_header(ofmt_ctx_v, NULL) < 0) {
		printf( "Error occurred when opening video output file\n");
		goto end;
	}
	if (avformat_write_header(ofmt_ctx_a, NULL) < 0) {
		printf( "Error occurred when opening audio output file\n");
		goto end;
	}
	
#if USE_H264BSF
	AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb"); 
#endif

	while (1) {
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		if (av_read_frame(ifmt_ctx, &pkt) < 0)
			break;
		in_stream  = ifmt_ctx->streams[pkt.stream_index];

		
		if(pkt.stream_index==videoindex){
			out_stream = ofmt_ctx_v->streams[0];
			ofmt_ctx=ofmt_ctx_v;
			printf("Write Video Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
		}else if(pkt.stream_index==audioindex){
			out_stream = ofmt_ctx_a->streams[0];
			ofmt_ctx=ofmt_ctx_a;
			printf("Write Audio Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
		}else{
			continue;
		}


		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;
		pkt.stream_index=0;
		//Write
		if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
			printf( "Error muxing packet\n");
			break;
		}
		//printf("Write %8d frames to output file\n",frame_index);
		av_free_packet(&pkt);
		frame_index++;
	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);  
#endif

	//Write file trailer
	av_write_trailer(ofmt_ctx_a);
	av_write_trailer(ofmt_ctx_v);
end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_a->pb);

	if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_v->pb);

	avformat_free_context(ofmt_ctx_a);
	avformat_free_context(ofmt_ctx_v);


	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}
Пример #5
0
int main(int argc, char* argv[]) {
    if (argc < 2) {
        printf("Usage: %s filename\n", argv[0]);
        return 0;
    }
    f = fopen("002.avi", "wb");
    if (signal(SIGINT, mysigint) == SIG_ERR)
       printf("Cannot handle SIGINT!\n");
    //if (signal(SIGHUP, mysighup) == SIG_ERR)
    //   printf("Cannot handle SIGHUP!\n");
    //if (signal(SIGTERM, mysigterm) == SIG_ERR)
    //   printf("Cannot handle SIGTERM!\n");

    /* can SIGKILL be handled by our own function? */
    //if (signal(SIGKILL, mysigkill) == SIG_ERR)
    //   printf("Cannot handle SIGKILL!\n");

    // Register all available file formats and codecs
    av_register_all();

    int err;
    // Init SDL with video support
    err = SDL_Init(SDL_INIT_VIDEO);
    if (err < 0) {
        fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
        return -1;
    }

    // Open video file
    const char* filename = argv[1];
    AVFormatContext* format_context = NULL;
    err = avformat_open_input(&format_context, filename, NULL, NULL);
    if (err < 0) {
        fprintf(stderr, "ffmpeg: Unable to open input file\n");
        return -1;
    }

    // Retrieve stream information
    err = avformat_find_stream_info(format_context, NULL);
    if (err < 0) {
        fprintf(stderr, "ffmpeg: Unable to find stream info\n");
        return -1;
    }

    // Dump information about file onto standard error
    av_dump_format(format_context, 0, argv[1], 0);

    // Find the first video stream
    int video_stream;
    for (video_stream = 0; video_stream < format_context->nb_streams; ++video_stream) {
        if (format_context->streams[video_stream]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            break;
        }
    }
    if (video_stream == format_context->nb_streams) {
        fprintf(stderr, "ffmpeg: Unable to find video stream\n");
        return -1;
    }

    AVCodecContext* codec_context = format_context->streams[video_stream]->codec;
    AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
    err = avcodec_open2(codec_context, codec, NULL);
    if (err < 0) {
        fprintf(stderr, "ffmpeg: Unable to open codec\n");
        return -1;
    }

    SDL_Surface* screen = SDL_SetVideoMode(codec_context->width, codec_context->height, 0, 0);
    if (screen == NULL) {
        fprintf(stderr, "Couldn't set video mode\n");
        return -1;
    }

    SDL_Overlay* bmp = SDL_CreateYUVOverlay(codec_context->width, codec_context->height,
                                            SDL_YV12_OVERLAY, screen);

    struct SwsContext* img_convert_context;
    img_convert_context = sws_getCachedContext(NULL,
                                                codec_context->width, codec_context->height,
                                                codec_context->pix_fmt,
                                                codec_context->width, codec_context->height,
                                                PIX_FMT_YUV420P, SWS_BICUBIC,
                                                NULL, NULL, NULL);
    if (img_convert_context == NULL) {
        fprintf(stderr, "Cannot initialize the conversion context\n");
        return -1;
    }


    AVFrame* frame = avcodec_alloc_frame();
    AVPacket packet;
    AVPacket packet_copy;

    // preparing output ...
    int i, ret;
    char * outputfile = "test.mpg";
    AVFormatContext * oformat_context = NULL;
    AVOutputFormat *ofmt = NULL;
    avformat_alloc_output_context2(&oformat_context, NULL, NULL, outputfile);
    if (!oformat_context) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        return (-1);
    };
    ofmt = oformat_context->oformat;

    for (i = 0; i < format_context->nb_streams; i++) {
        AVStream *in_stream = format_context->streams[i];
        AVStream *out_stream = avformat_new_stream(oformat_context, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            return (-1);
        }
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            return (-1);
        }
        out_stream->codec->codec_tag = 0;
        if (oformat_context->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    av_dump_format(oformat_context, 0, outputfile, 1);
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oformat_context->pb, outputfile, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", outputfile);
            return (-1);
        }
    }
    ret = avformat_write_header(oformat_context, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        return (-1);
    };
    AVStream *in_stream, *out_stream;


    while (av_read_frame(format_context, &packet) >= 0) {
        av_copy_packet(&packet_copy, &packet);
        // in_stream  = format_context->streams[packet_copy.stream_index];
        // out_stream = oformat_context->streams[packet_copy.stream_index];

        // packet_copy.pts = av_rescale_q_rnd(packet_copy.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        // packet_copy.dts = av_rescale_q_rnd(packet_copy.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        // packet_copy.duration = av_rescale_q(packet_copy.duration, in_stream->time_base, out_stream->time_base);
        // packet_copy.pos = -1;
        ret = av_interleaved_write_frame(oformat_context, &packet_copy);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
        };

        if (packet.stream_index == video_stream) {
            // Video stream packet
            int frame_finished;

            avcodec_decode_video2(codec_context, frame, &frame_finished, &packet);

            if (frame_finished) {
                SDL_LockYUVOverlay(bmp);

                // Convert frame to YV12 pixel format for display in SDL overlay

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];  // it's because YV12
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                sws_scale(img_convert_context,
                            frame->data, frame->linesize,
                            0, codec_context->height,
                            pict.data, pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                SDL_Rect rect;
                rect.x = 0;
                rect.y = 0;
                rect.w = codec_context->width;
                rect.h = codec_context->height;
                SDL_DisplayYUVOverlay(bmp, &rect);

                printf("%d\n", packet.size);
                fwrite(packet.data, 1, packet.size, f);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);

        // Handling SDL events there
        SDL_Event event;
        if (SDL_PollEvent(&event)) {
            if (event.type == SDL_QUIT) {
                break;
            }
        }
    }

    fclose(f);

    av_write_trailer(oformat_context);

    if (oformat_context && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&oformat_context->pb);
    avformat_free_context(oformat_context);

    sws_freeContext(img_convert_context);

    // Free the YUV frame
    av_free(frame);

    // Close the codec
    avcodec_close(codec_context);

    // Close the video file
    avformat_close_input(&format_context);

    // Quit SDL
    SDL_Quit();
    return 0;
}
Пример #6
0
void Muxer::MuxerThread() {
	try {

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread started."));

		double total_time = 0.0;

		// start muxing
		for( ; ; ) {

			// get a packet from a stream that isn't done yet
			std::unique_ptr<AVPacketWrapper> packet;
			unsigned int current_stream = 0, streams_done = 0;
			for(unsigned int i = 0; i < m_format_context->nb_streams; ++i) {
				StreamLock lock(&m_stream_data[i]);
				if(lock->m_packet_queue.empty()) {
					if(lock->m_is_done)
						++streams_done;
				} else {
					current_stream = i;
					packet = std::move(lock->m_packet_queue.front());
					lock->m_packet_queue.pop_front();
					break;
				}
			}

			// if all streams are done, we can stop
			if(streams_done == m_format_context->nb_streams) {
				break;
			}

			// if there is no packet, wait and try again later
			if(packet == NULL) {
				usleep(20000);
				continue;
			}

			// try to figure out the time (the exact value is not critical, it's only used for bitrate statistics)
			AVStream *stream = m_format_context->streams[current_stream];
			double packet_time = 0.0;
			if(packet->GetPacket()->dts != AV_NOPTS_VALUE)
				packet_time = (double) packet->GetPacket()->dts * ToDouble(stream->codec->time_base);
			else if(packet->GetPacket()->pts != AV_NOPTS_VALUE)
				packet_time = (double) packet->GetPacket()->pts * ToDouble(stream->codec->time_base);
			if(packet_time > total_time)
				total_time = packet_time;

			// prepare packet
			packet->GetPacket()->stream_index = current_stream;
#if SSR_USE_AV_PACKET_RESCALE_TS
			av_packet_rescale_ts(packet->GetPacket(), stream->codec->time_base, stream->time_base);
#else
			if(packet->GetPacket()->pts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->pts = av_rescale_q(packet->GetPacket()->pts, stream->codec->time_base, stream->time_base);
			}
			if(packet->GetPacket()->dts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->dts = av_rescale_q(packet->GetPacket()->dts, stream->codec->time_base, stream->time_base);
			}
#endif

			// write the packet (again, why does libav/ffmpeg call this a frame?)
			if(av_interleaved_write_frame(m_format_context, packet->GetPacket()) != 0) {
				Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Error: Can't write frame to muxer!"));
				throw LibavException();
			}

			// the data is now owned by libav/ffmpeg, so don't free it
			packet->SetFreeOnDestruct(false);

			// update the byte counter
			{
				SharedLock lock(&m_shared_data);
				lock->m_total_bytes = m_format_context->pb->pos + (m_format_context->pb->buf_ptr - m_format_context->pb->buffer);
				if(lock->m_stats_previous_time == NOPTS_DOUBLE) {
					lock->m_stats_previous_time = total_time;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
				double timedelta = total_time - lock->m_stats_previous_time;
				if(timedelta > 0.999999) {
					lock->m_stats_actual_bit_rate = (double) ((lock->m_total_bytes - lock->m_stats_previous_bytes) * 8) / timedelta;
					lock->m_stats_previous_time = total_time;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
			}

		}

		// tell the others that we're done
		m_is_done = true;

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread stopped."));

	} catch(const std::exception& e) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Exception '%1' in muxer thread.").arg(e.what()));
	} catch(...) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Unknown exception in muxer thread."));
	}
}
Пример #7
0
int H264BS2Video::writeFrame(void)
{
    AVPacket avpkt = { 0 };
    int idx = 0;

#if 0
    char* filename = "../ffmpeg_log.txt";
    FILE* fp = NULL;
    char buffer[128] = {0};

    fp = fopen(filename, "w");
    if (fp == NULL)
    {
        printf("open file %s failed.\n", filename);
        return -1;
    }
#endif

    av_init_packet(&avpkt);

    // av_read_fram返回下一帧,发生错误或文件结束返回<0
    while (av_read_frame(m_infctx, &avpkt) >= 0)
    {
        // 解码视频流
        if (avpkt.stream_index == m_videoidx)
        {
            //debug("write %d, size: %d\n", idx++, avpkt.size);

            if (avpkt.pts == AV_NOPTS_VALUE)
            {
                // 计算PTS/DTS
                AVRational time_base = m_instream->time_base;
                int64_t duration=(int64_t)((double)AV_TIME_BASE/(double)av_q2d(m_instream->r_frame_rate));
                avpkt.pts=(int64_t)((double)(idx*duration)/(double)(av_q2d(time_base)*AV_TIME_BASE));
                avpkt.dts=avpkt.pts;
                avpkt.duration=(int)((double)duration/(double)(av_q2d(time_base)*AV_TIME_BASE));

                // 转换 PTS/DTS
                avpkt.pts = av_rescale_q_rnd(avpkt.pts, m_instream->time_base, m_outstream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
                avpkt.dts = av_rescale_q_rnd(avpkt.dts, m_instream->time_base, m_outstream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
                avpkt.duration = (int)av_rescale_q(avpkt.duration, m_instream->time_base, m_outstream->time_base);

                idx++;
#if 0
                sprintf(buffer, "111 %d pts: %d dts: %d duration: %d\n", idx, avpkt.pts, avpkt.dts, avpkt.duration);
                fwrite(buffer, 1, strlen(buffer), fp);
#endif
            }

            if (av_interleaved_write_frame(m_outfctx, &avpkt) < 0)
            {
                debug("write frame failed.\n");
                break;
            }
        }

        av_packet_unref(&avpkt);

        avpkt.data = NULL;
        avpkt.size = 0;
    }

    //fclose(fp);

    return 0;
}
Пример #8
0
static int add_to_proxy_output_ffmpeg(
    struct proxy_output_ctx * ctx, AVFrame * frame)
{
    int outsize = 0;

    if (!ctx) {
        return 0;
    }

    if (ctx->sws_ctx && frame &&
            (frame->data[0] || frame->data[1] ||
             frame->data[2] || frame->data[3])) {
        sws_scale(ctx->sws_ctx, (const uint8_t * const*) frame->data,
                  frame->linesize, 0, ctx->orig_height,
                  ctx->frame->data, ctx->frame->linesize);
    }

    frame = ctx->sws_ctx ? (frame ? ctx->frame : 0) : frame;

    if (frame) {
        frame->pts = ctx->cfra++;
    }

    outsize = avcodec_encode_video(
                  ctx->c, ctx->video_buffer, ctx->video_buffersize,
                  frame);

    if (outsize < 0) {
        fprintf(stderr, "Error encoding proxy frame %d for '%s'\n",
                ctx->cfra - 1, ctx->of->filename);
        return 0;
    }

    if (outsize != 0) {
        AVPacket packet;
        av_init_packet(&packet);

        if (ctx->c->coded_frame->pts != AV_NOPTS_VALUE) {
            packet.pts = av_rescale_q(ctx->c->coded_frame->pts,
                                      ctx->c->time_base,
                                      ctx->st->time_base);
        }
        if (ctx->c->coded_frame->key_frame)
            packet.flags |= AV_PKT_FLAG_KEY;

        packet.stream_index = ctx->st->index;
        packet.data = ctx->video_buffer;
        packet.size = outsize;

        if (av_interleaved_write_frame(ctx->of, &packet) != 0) {
            fprintf(stderr, "Error writing proxy frame %d "
                    "into '%s'\n", ctx->cfra - 1,
                    ctx->of->filename);
            return 0;
        }

        return 1;
    } else {
        return 0;
    }
}
Пример #9
0
static int tee_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
    TeeContext *tee = avf->priv_data;
    AVFormatContext *avf2;
    AVBSFContext *bsfs;
    AVPacket pkt2;
    int ret_all = 0, ret;
    unsigned i, s;
    int s2;

    for (i = 0; i < tee->nb_slaves; i++) {
        if (!(avf2 = tee->slaves[i].avf))
            continue;

        /* Flush slave if pkt is NULL*/
        if (!pkt) {
            ret = av_interleaved_write_frame(avf2, NULL);
            if (ret < 0) {
                ret = tee_process_slave_failure(avf, i, ret);
                if (!ret_all && ret < 0)
                    ret_all = ret;
            }
            continue;
        }

        s = pkt->stream_index;
        s2 = tee->slaves[i].stream_map[s];
        if (s2 < 0)
            continue;

        memset(&pkt2, 0, sizeof(AVPacket));
        if ((ret = av_packet_ref(&pkt2, pkt)) < 0)
            if (!ret_all) {
                ret_all = ret;
                continue;
            }
        bsfs = tee->slaves[i].bsfs[s2];
        pkt2.stream_index = s2;

        ret = av_bsf_send_packet(bsfs, &pkt2);
        if (ret < 0) {
            av_log(avf, AV_LOG_ERROR, "Error while sending packet to bitstream filter: %s\n",
                   av_err2str(ret));
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }

        while(1) {
            ret = av_bsf_receive_packet(bsfs, &pkt2);
            if (ret == AVERROR(EAGAIN)) {
                ret = 0;
                break;
            } else if (ret < 0) {
                break;
            }

            av_packet_rescale_ts(&pkt2, bsfs->time_base_out,
                                 avf2->streams[s2]->time_base);
            ret = av_interleaved_write_frame(avf2, &pkt2);
            if (ret < 0)
                break;
        };

        if (ret < 0) {
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }
    }
    return ret_all;
}
Пример #10
0
/**
 * Write a packet to the muxer
 */
static int
lav_muxer_write_pkt(muxer_t *m, streaming_message_type_t smt, void *data)
{
  int i;
  AVFormatContext *oc;
  AVStream *st;
  AVPacket packet;
  th_pkt_t *pkt = (th_pkt_t*)data;
  lav_muxer_t *lm = (lav_muxer_t*)m;
  int rc = 0;

  assert(smt == SMT_PACKET);

  oc = lm->lm_oc;

  if(!oc->nb_streams) {
    tvhlog(LOG_ERR, "libav", "No streams to mux");
    rc = -1;
    goto ret;
  }

  if(!lm->lm_init) {
    tvhlog(LOG_ERR, "libav", "Muxer not initialized correctly");
    rc = -1;
    goto ret;
  }

  for(i=0; i<oc->nb_streams; i++) {
    st = oc->streams[i];

    if(st->id != pkt->pkt_componentindex)
      continue;

    av_init_packet(&packet);

    if(st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
      pkt = pkt_merge_header(pkt);

    if(lm->lm_h264_filter && st->codec->codec_id == CODEC_ID_H264) {
      if(av_bitstream_filter_filter(lm->lm_h264_filter,
				    st->codec, 
				    NULL, 
				    &packet.data, 
				    &packet.size, 
				    pktbuf_ptr(pkt->pkt_payload), 
				    pktbuf_len(pkt->pkt_payload), 
				    pkt->pkt_frametype < PKT_P_FRAME) < 0) {
	tvhlog(LOG_WARNING, "libav",  "Failed to filter bitstream");
	break;
      }
    } else {
      packet.data = pktbuf_ptr(pkt->pkt_payload);
      packet.size = pktbuf_len(pkt->pkt_payload);
    }

    packet.stream_index = st->index;
 
    packet.pts      = av_rescale_q(pkt->pkt_pts     , mpeg_tc, st->time_base);
    packet.dts      = av_rescale_q(pkt->pkt_dts     , mpeg_tc, st->time_base);
    packet.duration = av_rescale_q(pkt->pkt_duration, mpeg_tc, st->time_base);

    if(pkt->pkt_frametype < PKT_P_FRAME)
      packet.flags |= AV_PKT_FLAG_KEY;

    if((rc = av_interleaved_write_frame(oc, &packet)))
      tvhlog(LOG_WARNING, "libav",  "Failed to write frame");

    // h264_mp4toannexb filter might allocate new data.
    if(packet.data != pktbuf_ptr(pkt->pkt_payload))
      av_free(packet.data);

    break;
  }

 ret:
  lm->m_errors += (rc != 0);
  pkt_ref_dec(pkt);

  return rc;
}
Пример #11
0
bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize)
{
   AVPacket pkt;
   int nBytesToWrite = 0;
   uint8_t *pRawSamples = NULL;
   int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);
   int ret;

   nBytesToWrite = frameSize;
   pRawSamples  = (uint8_t*)pFrame;
   av_fifo_realloc2(mEncAudioFifo, av_fifo_size(mEncAudioFifo) + frameSize);

   // Put the raw audio samples into the FIFO.
   ret = av_fifo_generic_write(mEncAudioFifo, pRawSamples, nBytesToWrite,NULL);

   wxASSERT(ret == nBytesToWrite);

   if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz) {
      wxMessageBox(wxString::Format(_("FFmpeg : ERROR - nAudioFrameSizeOut too large.")),
                   _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
      return false;
   }

   // Read raw audio samples out of the FIFO in nAudioFrameSizeOut byte-sized groups to encode.
   while ((ret = av_fifo_size(mEncAudioFifo)) >= nAudioFrameSizeOut)
   {
      ret = av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nAudioFrameSizeOut, NULL);

      av_init_packet(&pkt);

      int ret= encode_audio(mEncAudioCodecCtx,
         &pkt,                          // out
         (int16_t*)mEncAudioFifoOutBuf, // in
         default_frame_size);
      if (ret < 0)
      {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't encode audio frame.")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return false;
      }
      if (ret == 0)
         continue;

      // Rescale from the codec time_base to the AVStream time_base.
      if (pkt.pts != int64_t(AV_NOPTS_VALUE))
         pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
      if (pkt.dts != int64_t(AV_NOPTS_VALUE))
         pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
      //wxLogDebug(wxT("FFmpeg : (%d) Writing audio frame with PTS: %lld."), mEncAudioCodecCtx->frame_number, (long long) pkt.pts);

      pkt.stream_index = mEncAudioStream->index;

      // Write the encoded audio frame to the output file.
      if ((ret = av_interleaved_write_frame(mEncFormatCtx, &pkt)) < 0)
      {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Failed to write audio frame to file.")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return false;
      }
      av_free_packet(&pkt);
   }
   return true;
}
Пример #12
0
bool ExportFFmpeg::Finalize()
{
   int i, nEncodedBytes;

   // Flush the audio FIFO and encoder.
   for (;;)
   {
      AVPacket pkt;
      int nFifoBytes = av_fifo_size(mEncAudioFifo); // any bytes left in audio FIFO?

      av_init_packet(&pkt);

      nEncodedBytes = 0;
      int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);

      if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz || nFifoBytes > mEncAudioFifoOutBufSiz) {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Too much remaining data.")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return false;
      }

      // Flush the audio FIFO first if necessary. It won't contain a _full_ audio frame because
      // if it did we'd have pulled it from the FIFO during the last encodeAudioFrame() call -
      // the encoder must support short/incomplete frames for this to work.
      if (nFifoBytes > 0)
      {
         // Fill audio buffer with zeroes. If codec tries to read the whole buffer,
         // it will just read silence. If not - who cares?
         memset(mEncAudioFifoOutBuf,0,mEncAudioFifoOutBufSiz);
         const AVCodec *codec = mEncAudioCodecCtx->codec;

         // We have an incomplete buffer of samples left.  Is it OK to encode it?
         // If codec supports CODEC_CAP_SMALL_LAST_FRAME, we can feed it with smaller frame
         // Or if codec is FLAC, feed it anyway (it doesn't have CODEC_CAP_SMALL_LAST_FRAME, but it works)
         // Or if frame_size is 1, then it's some kind of PCM codec, they don't have frames and will be fine with the samples
         // Or if user configured the exporter to pad with silence, then we'll send audio + silence as a frame.
         if ((codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE))
            || mEncAudioCodecCtx->frame_size <= 1
            || gPrefs->Read(wxT("/FileFormats/OverrideSmallLastFrame"), true)
            )
         {
            int frame_size = default_frame_size;

            // The last frame is going to contain a smaller than usual number of samples.
            // For codecs without CODEC_CAP_SMALL_LAST_FRAME use normal frame size
            if (codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE))
               frame_size = nFifoBytes / (mEncAudioCodecCtx->channels * sizeof(int16_t));

            wxLogDebug(wxT("FFmpeg : Audio FIFO still contains %d bytes, writing %d sample frame ..."),
               nFifoBytes, frame_size);

            // Pull the bytes out from the FIFO and feed them to the encoder.
            if (av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nFifoBytes, NULL) == 0)
            {
               nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, (int16_t*)mEncAudioFifoOutBuf, frame_size);
            }
         }
      }

      // Now flush the encoder.
      if (nEncodedBytes <= 0)
         nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, NULL, 0);

      if (nEncodedBytes <= 0)
         break;

      pkt.stream_index = mEncAudioStream->index;

      // Set presentation time of frame (currently in the codec's timebase) in the stream timebase.
      if(pkt.pts != int64_t(AV_NOPTS_VALUE))
         pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
      if(pkt.dts != int64_t(AV_NOPTS_VALUE))
         pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);

      if (av_interleaved_write_frame(mEncFormatCtx, &pkt) != 0)
      {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Couldn't write last audio frame to output file.")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         break;
      }
      av_free_packet(&pkt);
   }

   // Write any file trailers.
   av_write_trailer(mEncFormatCtx);

   // Close the codecs.
   if (mEncAudioStream != NULL)
      avcodec_close(mEncAudioStream->codec);

   for (i = 0; i < (int)mEncFormatCtx->nb_streams; i++)
   {
      av_freep(&mEncFormatCtx->streams[i]->codec);
      av_freep(&mEncFormatCtx->streams[i]);
   }

   // Close the output file if we created it.
   if (!(mEncFormatDesc->flags & AVFMT_NOFILE))
      ufile_close(mEncFormatCtx->pb);

   // Free any buffers or structures we allocated.
   av_free(mEncFormatCtx);

   av_freep(&mEncAudioFifoOutBuf);
   mEncAudioFifoOutBufSiz = 0;

   av_fifo_free(mEncAudioFifo);

   mEncAudioFifo = NULL;

   return true;
}
Пример #13
0
int main(void)
{

	int frame = 0, ret = 0, got_picture = 0, frameFinished = 0, videoStream = 0, check_yuv = 0;
	int frame_size = 0, bitrate = 0;
	int streamIdx = 0;
	unsigned i = 0;
	enum AVMediaType mediaType;
	struct SwsContext *sws_ctx = NULL;
	AVStream *video_st = NULL;
	AVCodecContext    *pCodecCtx = NULL, *ctxEncode = NULL;
	AVFrame           *pFrame = NULL;
	AVPacket          input_pkt, output_pkt;

	check_yuv = check_file();

	// Register all formats and codecs
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	init_parameter(&input_pkt, &output_pkt); //init parameter function
	pictureEncoded_init();

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(inFmtCtx->streams[streamIdx]->codec->width, inFmtCtx->streams[streamIdx]->codec->height, inFmtCtx->streams[streamIdx]->codec->pix_fmt, clip_width, clip_height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

	while (av_read_frame(inFmtCtx, &input_pkt) >= 0) {

		streamIdx = input_pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;

		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		pFrame = av_frame_alloc();

		if (!pFrame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&input_pkt, inFmtCtx->streams[videoStream]->time_base, inFmtCtx->streams[streamIdx]->codec->time_base);


		if (mediaType == AVMEDIA_TYPE_VIDEO){


			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, pFrame, &frameFinished, &input_pkt);       // Decode video frame (input_pkt-> pFrame)

			if (ret < 0)
			{
				av_frame_free(&pFrame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}
			 

			if (frameFinished){

				frame_num++;

				sws_scale(sws_ctx, (const uint8_t * const *)pFrame->data, pFrame->linesize, 0, clip_height, pictureEncoded->data, pictureEncoded->linesize);

				pictureEncoded->pts = av_frame_get_best_effort_timestamp(pFrame);

				//pictureEncoded-> output_pkt
				//avcodec_encode_video2(ctxEncode, &output_pkt, pictureEncoded, &got_picture);
				avcodec_encode_video2(ofmt_ctx->streams[streamIdx]->codec, &output_pkt, pictureEncoded, &got_picture);

				av_frame_free(&pFrame);

				//if the function is working
				if (got_picture){

					//printf("Encoding %d \n", frame_use);

					frame_use++;


					av_packet_rescale_ts(&output_pkt, ofmt_ctx->streams[streamIdx]->codec->time_base, ofmt_ctx->streams[streamIdx]->time_base);

					//av_packet_rescale_ts(&output_pkt, ctxEncode->time_base, video_st->time_base);

					ret = av_interleaved_write_frame(ofmt_ctx, &output_pkt);

					if (ret < 0) {
						fprintf(stderr, "Error muxing packet\n");
						break;
					}
				}
			}

			av_free_packet(&input_pkt);
			av_free_packet(&output_pkt);

		}

	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {

			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}

	printf("\n\n total frame_num : %d , frame_encode:  %d \n", frame_num - 1, frame_use - 1);


	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */
	av_write_trailer(ofmt_ctx);

	// Free the YUV frame
	av_frame_free(&pFrame);
	av_frame_free(&pictureEncoded);

	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		avcodec_close(inFmtCtx->streams[i]->codec);
		if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
			avcodec_close(ofmt_ctx->streams[i]->codec);
	}

	avformat_close_input(&inFmtCtx);

	if (ofmt_ctx&& !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_closep(&ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);

	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Error occurred \n");

	system("pause");
	return 0;
}
Пример #14
0
int ConvertH264toTS(int fileNumber)
{
	AVFormatContext* inputFormatContext = NULL;
	AVFormatContext* outputFormatContext = NULL;
	AVStream* inStream = NULL;
	AVStream* outStream = NULL;
	AVOutputFormat* outFormat = NULL;
	AVCodec* outCodec = NULL;
	AVPacket pkt;
	AVPacket outpkt;
	char inputFile[200], outputFile[200];
	unsigned int i, inStreamIndex = 0;
	int fps, pts = 0, last_pts = 0;
	int64_t inputEndTime;

	// intialize ffmpeg libraries	
	av_register_all();
	
	// Open input file
	sprintf(inputFile, "VIDEO%d.h264", fileNumber);
	if(avformat_open_input(&inputFormatContext, inputFile, NULL, NULL) != 0)
	{
		printf("\nopen %s file error!!!!!!!\n", inputFile);
		return -1;
	}

	// Find input file's stream info
	if((avformat_find_stream_info(inputFormatContext, NULL)) < 0)
	{
		printf("\nfind stream info error!!!!!!!\n");
		return -1;
	}
	else
	{
		// printf("found inputfile's stream info\n");
	}

	// Dump information about the input file onto strerr
	av_dump_format(inputFormatContext, 0, inputFile, 0);

	for(i = 0; i < inputFormatContext->nb_streams; i++)
	{
		if(inputFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			inStreamIndex = i;
            inStream = inputFormatContext->streams[i];
            // printf("found stream!!!! inStreamIndex : %d\n", inStreamIndex);

            break;
		}
	}
	
	// if there is no duration info of inputFile(h264), deduce it from time_base
	if(inputFormatContext->duration == AV_NOPTS_VALUE)
	{
        if(inStreamIndex != -1 && inputFormatContext->streams[inStreamIndex])
        {	
            if(inputFormatContext->streams[inStreamIndex]->duration != AV_NOPTS_VALUE)
            {
                inputEndTime = (inputFormatContext->streams[inStreamIndex]->duration)/(inputFormatContext->streams[inStreamIndex]->time_base.den/inputFormatContext->streams[inStreamIndex]->time_base.num);
            }
        }
    }
    else
        inputEndTime = (inputFormatContext->duration)/(AV_TIME_BASE);

    // calculate frame per second(fps), but following code doesnt work properly
    // because h264 file doesn't have any context information such as fps, duration and etc.
    // therefore fps is now set arbitrarily as 30 according to h264 input file.
    // if(inStreamIndex != -1 && inputFormatContext->streams[inStreamIndex])
    // {
    //     if(inputFormatContext->streams[inStreamIndex]->r_frame_rate.num != AV_NOPTS_VALUE && inputFormatContext->streams[inStreamIndex]->r_frame_rate.den != 0)
    //     {
    //         fps =  (inputFormatContext->streams[inStreamIndex]->r_frame_rate.num)/ (inputFormatContext->streams[inStreamIndex]->r_frame_rate.den);
    //     }
    // }
    // else
    // {
    //     fps = 30;
    // }
    fps = 30;

	// Create outputFile and allocate output format
	sprintf(outputFile, "VIDEO%d.ts", fileNumber);
	outFormat = av_guess_format(NULL, outputFile, NULL);
	if(outFormat == NULL) // ts format doesnt exist
	{
		printf("output file format doesnt exist");
		return -1;
	}
	else // ts format exists
	{
		outputFormatContext = avformat_alloc_context();
		if(outputFormatContext != NULL)
		{
			outputFormatContext->oformat = outFormat;
			snprintf(outputFormatContext->filename, sizeof(outputFormatContext->filename), "%s", outputFile); // ?????
		}
		else
		{
			printf("outputFormatContext allocation error");
			return -1;
		}
	}

	// Add video stream to output format
	if((outFormat->video_codec != 0) && (inStream != NULL))
	{
		outCodec = avcodec_find_encoder(outFormat->video_codec);
		if(outCodec == NULL)
		{
			printf("could not find vid encoder");
			return -1;
		}
		else
		{
			printf("found out vid encoder : %s\n", outCodec->name);
			outStream = avformat_new_stream(outputFormatContext, outCodec);
            if(NULL == outStream)
            {
            	printf("failed to allocated output vid strm");
            	return -1;
            }
            else
            {	 // avcodec_copy_context() return 0 when ok
				if(avcodec_copy_context(outStream->codec, inputFormatContext->streams[inStreamIndex]->codec) != 0)
				{
					printf("Failed to copy context");
					return -1;
				}
				else
				{
					// time_base is used to calculate when to decode and show the frame
					outStream->sample_aspect_ratio.den = outStream->codec->sample_aspect_ratio.den;
                    outStream->sample_aspect_ratio.num = inStream->codec->sample_aspect_ratio.num;
                    outStream->codec->codec_id = inStream->codec->codec_id;
                    outStream->codec->time_base.num = 2;
                    outStream->codec->time_base.den = fps * (inStream->codec->ticks_per_frame);
                    outStream->time_base.num = 1;
                    outStream->time_base.den = 1000;
                    outStream->r_frame_rate.num = fps;
                    outStream->r_frame_rate.den = 1;
                    outStream->avg_frame_rate.num = fps;
                    outStream->avg_frame_rate.den = 1;
				}
			}
		}
	}
	else
		printf("stream context outputting fail !!!!!!!!!!!!!!!!\n");

	// in avformat.h, #define AVFMT_NOFILE 0x0001.
	// Demuxer will use avio_open, no opened file should be provided by the caller.
	if(!(outFormat->flags & AVFMT_NOFILE))
	{
		if (avio_open2(&outputFormatContext->pb, outputFile, AVIO_FLAG_WRITE, NULL, NULL) < 0) 
		{
			printf("Could Not Open File ");
			return -1;
		}
		// else
		// 	printf("avio_open2 success!!!\n");
	}

    // Write the stream header, if any.
	if (avformat_write_header(outputFormatContext, NULL) < 0)
	{
		printf("Error Occurred While Writing Header ");
		return -1;
	}
	else
		// printf("Written Output header");

	// Now in while loop read frame using av_read_frame and write to output format using 
	// av_interleaved_write_frame(). You can use following loop
	// while(av_read_frame(inputFormatContext, &pkt) >= 0 && (m_num_frames-- > 0))
	while(av_read_frame(inputFormatContext, &pkt) >= 0)
	{
		if(pkt.stream_index == inStreamIndex)
		{
			// av_rescale_q(pkt.pts, inStream->time_base, inStream->codec->time_base);
			// av_rescale_q(pkt.dts, inStream->time_base, inStream->codec->time_base);
			
			av_init_packet(&outpkt);
			
			if(pkt.pts != AV_NOPTS_VALUE) // AV_NOPTS_VALUE means undefined timestamp value
			{
				if(last_pts == pts)
				{
					pts++;
					last_pts = pts;
				}
				
				outpkt.pts = pts;   
			}
			else // pkt.pts is undefined
				outpkt.pts = AV_NOPTS_VALUE;

			if(pkt.dts == AV_NOPTS_VALUE) // if pkt's dts value is undefined
				outpkt.dts = AV_NOPTS_VALUE;
			// if pkt's dts value is defined with a value
			else
				outpkt.dts = pts;

			outpkt.data = pkt.data;
			outpkt.size = pkt.size;
			outpkt.stream_index = pkt.stream_index;
			outpkt.flags |= AV_PKT_FLAG_KEY; // #define AV_PKT_FLAG_KEY 0x0001, which means the packet contains a keyframe
			last_pts = pts;

			if(av_interleaved_write_frame(outputFormatContext, &outpkt) < 0)
				printf("failed video write\n");
			else
			{
				// printf("video write ok!!!!\n");
				outStream->codec->frame_number++;
			}
			
			av_free_packet(&outpkt);
			av_free_packet(&pkt);
		}
	}

	// Finally write trailer and clean up everything
	av_write_trailer(outputFormatContext);
	
	// free the memory
	if(inStream && inStream->codec)
		avcodec_close(inStream->codec);
	if(inputFormatContext)
		avformat_close_input(&inputFormatContext);
	if(outStream && outStream->codec)
		avcodec_close(outStream->codec);
	if(outputFormatContext)
	{
		avformat_close_input(&outputFormatContext);
		outputFormatContext = NULL;
	}
	
	return 0;
}
Пример #15
0
int AVFormatWriter::WriteVideoFrame(VideoFrame *frame)
{
    //AVCodecContext *c = m_videoStream->codec;

    uint8_t *planes[3];
    unsigned char *buf = frame->buf;
    int framesEncoded = m_framesWritten + m_bufferedVideoFrameTimes.size();

    planes[0] = buf;
    planes[1] = planes[0] + frame->width * frame->height;
    planes[2] = planes[1] + (frame->width * frame->height) /
        4; // (pictureFormat == PIX_FMT_YUV422P ? 2 : 4);

    av_frame_unref(m_picture);
    m_picture->data[0] = planes[0];
    m_picture->data[1] = planes[1];
    m_picture->data[2] = planes[2];
    m_picture->linesize[0] = frame->width;
    m_picture->linesize[1] = frame->width / 2;
    m_picture->linesize[2] = frame->width / 2;
    m_picture->pts = framesEncoded + 1;
    m_picture->type = FF_BUFFER_TYPE_SHARED;

    if ((framesEncoded % m_keyFrameDist) == 0)
        m_picture->pict_type = AV_PICTURE_TYPE_I;
    else
        m_picture->pict_type = AV_PICTURE_TYPE_NONE;

    int got_pkt = 0;
    int ret = 0;

    m_bufferedVideoFrameTimes.push_back(frame->timecode);
    m_bufferedVideoFrameTypes.push_back(m_picture->pict_type);

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    {
        QMutexLocker locker(avcodeclock);
        ret = avcodec_encode_video2(m_videoStream->codec, &pkt,
                                    m_picture, &got_pkt);
    }

    if (ret < 0)
    {
        LOG(VB_RECORD, LOG_ERR, "avcodec_encode_video2() failed");
        return ret;
    }

    if (!got_pkt)
    {
        //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): Frame Buffered: cs: %1, mfw: %2, f->tc: %3, fn: %4, pt: %5").arg(pkt.size).arg(m_framesWritten).arg(frame->timecode).arg(frame->frameNumber).arg(m_picture->pict_type));
        return ret;
    }

    long long tc = frame->timecode;

    if (!m_bufferedVideoFrameTimes.isEmpty())
        tc = m_bufferedVideoFrameTimes.takeFirst();
    if (!m_bufferedVideoFrameTypes.isEmpty())
    {
        int pict_type = m_bufferedVideoFrameTypes.takeFirst();
        if (pict_type == AV_PICTURE_TYPE_I)
            pkt.flags |= AV_PKT_FLAG_KEY;
    }

    if (m_startingTimecodeOffset == -1)
        m_startingTimecodeOffset = tc - 1;
    tc -= m_startingTimecodeOffset;

    pkt.pts = tc * m_videoStream->time_base.den / m_videoStream->time_base.num / 1000;
    pkt.dts = AV_NOPTS_VALUE;
    pkt.stream_index= m_videoStream->index;

    //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): cs: %1, mfw: %2, pkt->pts: %3, tc: %4, fn: %5, pic->pts: %6, f->tc: %7, pt: %8").arg(pkt.size).arg(m_framesWritten).arg(pkt.pts).arg(tc).arg(frame->frameNumber).arg(m_picture->pts).arg(frame->timecode).arg(m_picture->pict_type));
    ret = av_interleaved_write_frame(m_ctx, &pkt);
    if (ret != 0)
        LOG(VB_RECORD, LOG_ERR, LOC + "WriteVideoFrame(): "
                "av_interleaved_write_frame couldn't write Video");

    frame->timecode = tc + m_startingTimecodeOffset;
    m_framesWritten++;

    av_free_packet(&pkt);

    return 1;
}
Пример #16
0
void ExternalOutput::writeVideoData(char* buf, int len) {
    RtpHeader* head = reinterpret_cast<RtpHeader*>(buf);

    uint16_t currentVideoSeqNumber = head->getSeqNumber();
    if (currentVideoSeqNumber != lastVideoSequenceNumber_ + 1) {
        // Something screwy.  We should always see sequence numbers incrementing monotonically.
        ELOG_DEBUG("Unexpected video sequence number; current %d, previous %d",
                  currentVideoSeqNumber, lastVideoSequenceNumber_);
        // Set our search state to look for the start of a frame, and discard what we currently have (if anything).
        // it's now worthless.
        vp8SearchState_ = lookingForStart;
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
    }

    lastVideoSequenceNumber_ = currentVideoSeqNumber;

    if (first_video_timestamp_ == -1) {
        first_video_timestamp_ = head->getTimestamp();
    }

    // TODO(pedro) we should be tearing off RTP padding here, if it exists.  But WebRTC currently does not use padding.

    RtpVP8Parser parser;
    erizo::RTPPayloadVP8* payload = parser.parseVP8(reinterpret_cast<unsigned char*>(buf + head->getHeaderLength()),
                                                    len - head->getHeaderLength());

    bool endOfFrame = (head->getMarker() > 0);
    bool startOfFrame = payload->beginningOfPartition;

    bool deliver = false;
    switch (vp8SearchState_) {
    case lookingForStart:
      if (startOfFrame && endOfFrame) {
        // This packet is a standalone frame.  Send it on.  Look for start.
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
        if (bufferCheck(payload)) {
          memcpy(unpackagedBufferpart_, payload->data, payload->dataLength);
          unpackagedSize_ += payload->dataLength;
          unpackagedBufferpart_ += payload->dataLength;
          deliver = true;
        }
      } else if (!startOfFrame && !endOfFrame) {
        // This is neither the start nor the end of a frame.  Reset our buffers.  Look for start.
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
      } else if (startOfFrame && !endOfFrame) {
        // Found start frame.  Copy to buffers.  Look for our end.
        if (bufferCheck(payload)) {
          memcpy(unpackagedBufferpart_, payload->data, payload->dataLength);
          unpackagedSize_ += payload->dataLength;
          unpackagedBufferpart_ += payload->dataLength;
          vp8SearchState_ = lookingForEnd;
        }
      } else {  // (!startOfFrame && endOfFrame)
        // We got the end of a frame.  Reset our buffers.
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
      }
      break;
    case lookingForEnd:
      if (startOfFrame && endOfFrame) {
        // Unexpected.  We were looking for the end of a frame, and got a whole new frame.
        // Reset our buffers, send this frame on, and go to the looking for start state.
        vp8SearchState_ = lookingForStart;
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
        if (bufferCheck(payload)) {
          memcpy(unpackagedBufferpart_, payload->data, payload->dataLength);
          unpackagedSize_ += payload->dataLength;
          unpackagedBufferpart_ += payload->dataLength;
          deliver = true;
        }
      } else if (!startOfFrame && !endOfFrame) {
        // This is neither the start nor the end.  Add it to our unpackage buffer.
        if (bufferCheck(payload)) {
          memcpy(unpackagedBufferpart_, payload->data, payload->dataLength);
          unpackagedSize_ += payload->dataLength;
          unpackagedBufferpart_ += payload->dataLength;
        }
      } else if (startOfFrame && !endOfFrame) {
        // Unexpected.  We got the start of a frame.  Clear out our buffer, toss this payload in,
        // and continue looking for the end.
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
        if (bufferCheck(payload)) {
          memcpy(unpackagedBufferpart_, payload->data, payload->dataLength);
          unpackagedSize_ += payload->dataLength;
          unpackagedBufferpart_ += payload->dataLength;
        }
      } else {  // (!startOfFrame && endOfFrame)
        // Got the end of a frame.  Let's deliver and start looking for the start of a frame.
        vp8SearchState_ = lookingForStart;
        if (bufferCheck(payload)) {
          memcpy(unpackagedBufferpart_, payload->data, payload->dataLength);
          unpackagedSize_ += payload->dataLength;
          unpackagedBufferpart_ += payload->dataLength;
          deliver = true;
        }
      }
      break;
    }

    delete payload;

    this->initContext();
    if (video_stream_ == NULL) {
      // could not init our context yet.
      return;
    }

    if (deliver) {
      unpackagedBufferpart_ -= unpackagedSize_;

      long long currentTimestamp = head->getTimestamp();  // NOLINT
      if (currentTimestamp - first_video_timestamp_ < 0) {
        // we wrapped.  add 2^32 to correct this.
        // We only handle a single wrap around since that's ~13 hours of recording, minimum.
        currentTimestamp += 0xFFFFFFFF;
      }

      // All of our video offerings are using a 90khz clock.
      long long timestampToWrite = (currentTimestamp - first_video_timestamp_) /  // NOLINT
                                                (90000 / video_stream_->time_base.den);

      // Adjust for our start time offset

      // in practice, our timebase den is 1000, so this operation is a no-op.
      timestampToWrite += video_offset_ms_ / (1000 / video_stream_->time_base.den);

      AVPacket avpkt;
      av_init_packet(&avpkt);
      avpkt.data = unpackagedBufferpart_;
      avpkt.size = unpackagedSize_;
      avpkt.pts = timestampToWrite;
      avpkt.stream_index = 0;
      av_interleaved_write_frame(context_, &avpkt);   // takes ownership of the packet
      unpackagedSize_ = 0;
      unpackagedBufferpart_ = unpackagedBuffer_;
    }
}
Пример #17
0
int AVFormatWriter::WriteAudioFrame(unsigned char *buf, int fnum, long long &timecode)
{
#if HAVE_BIGENDIAN
    bswap_16_buf((short int*) buf, m_audioFrameSize, m_audioChannels);
#endif

    int got_packet = 0;
    int ret = 0;
    int samples_per_avframe  = m_audioFrameSize * m_audioChannels;
    int sampleSizeIn   = AudioOutputSettings::SampleSize(FORMAT_S16);
    AudioFormat format =
        AudioOutputSettings::AVSampleFormatToFormat(m_audioStream->codec->sample_fmt);
    int sampleSizeOut  = AudioOutputSettings::SampleSize(format);

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data          = NULL;
    pkt.size          = 0;

    if (av_get_packed_sample_fmt(m_audioStream->codec->sample_fmt) == AV_SAMPLE_FMT_FLT)
    {
        AudioOutputUtil::toFloat(FORMAT_S16, (void *)m_audioInBuf, (void *)buf,
                                 samples_per_avframe * sampleSizeIn);
        buf = m_audioInBuf;
    }
    if (av_sample_fmt_is_planar(m_audioStream->codec->sample_fmt))
    {
        AudioOutputUtil::DeinterleaveSamples(format,
                                             m_audioChannels,
                                             m_audioInPBuf,
                                             buf,
                                             samples_per_avframe * sampleSizeOut);

        // init AVFrame for planar data (input is interleaved)
        for (int j = 0, jj = 0; j < m_audioChannels; j++, jj += m_audioFrameSize)
        {
            m_audPicture->data[j] = (uint8_t*)(m_audioInPBuf + jj * sampleSizeOut);
        }
    }
    else
    {
        m_audPicture->data[0] = buf;
    }

    m_audPicture->linesize[0] = m_audioFrameSize;
    m_audPicture->nb_samples = m_audioFrameSize;
    m_audPicture->format = m_audioStream->codec->sample_fmt;
    m_audPicture->extended_data = m_audPicture->data;

    m_bufferedAudioFrameTimes.push_back(timecode);

    {
        QMutexLocker locker(avcodeclock);
        ret = avcodec_encode_audio2(m_audioStream->codec, &pkt,
                                    m_audPicture, &got_packet);
    }

    if (ret < 0)
    {
        LOG(VB_RECORD, LOG_ERR, "avcodec_encode_audio2() failed");
        return ret;
    }

    if (!got_packet)
    {
        //LOG(VB_RECORD, LOG_ERR, QString("WriteAudioFrame(): Frame Buffered: cs: %1, mfw: %2, f->tc: %3, fn: %4").arg(m_audPkt->size).arg(m_framesWritten).arg(timecode).arg(fnum));
        return ret;
    }

    long long tc = timecode;

    if (m_bufferedAudioFrameTimes.size())
        tc = m_bufferedAudioFrameTimes.takeFirst();

    if (m_startingTimecodeOffset == -1)
        m_startingTimecodeOffset = tc - 1;
    tc -= m_startingTimecodeOffset;

    if (m_avVideoCodec)
        pkt.pts = tc * m_videoStream->time_base.den / m_videoStream->time_base.num / 1000;
    else
        pkt.pts = tc * m_audioStream->time_base.den / m_audioStream->time_base.num / 1000;

    pkt.dts = AV_NOPTS_VALUE;
    pkt.flags |= AV_PKT_FLAG_KEY;
    pkt.stream_index = m_audioStream->index;

    //LOG(VB_RECORD, LOG_ERR, QString("WriteAudioFrame(): cs: %1, mfw: %2, pkt->pts: %3, tc: %4, fn: %5, f->tc: %6").arg(m_audPkt->size).arg(m_framesWritten).arg(m_audPkt->pts).arg(tc).arg(fnum).arg(timecode));

    ret = av_interleaved_write_frame(m_ctx, &pkt);
    if (ret != 0)
        LOG(VB_RECORD, LOG_ERR, LOC + "WriteAudioFrame(): "
                "av_interleaved_write_frame couldn't write Audio");
    timecode = tc + m_startingTimecodeOffset;

    av_free_packet(&pkt);

    return 1;
}
Пример #18
0
int main(int argc, char **argv)
{
    double prev_segment_time = 0;
    unsigned int output_index = 1;
    AVInputFormat *ifmt;
    AVOutputFormat *ofmt;
    AVFormatContext *ic = NULL;
    AVFormatContext *oc;
    AVStream *video_st = NULL;
    AVStream *audio_st = NULL;
    AVCodec *codec;
    char *output_filename;
    char *remove_filename;
    int video_index = -1;
    int audio_index = -1;
    int last_chunk = 0;
    int resume = 0;
    unsigned int first_segment = 1;
    unsigned int last_segment = 0;
    int write_index = 1;
    int decode_done;
    char *dot;
    int ret;
    unsigned int i;
    int remove_file;
    struct sigaction act;

    int opt;
    int longindex;
    char *endptr;
    struct options_t options;

    static const char *optstring = "i:d:p:m:u:r::n:ovh?";

    static const struct option longopts[] = {
        { "input",         required_argument, NULL, 'i' },
        { "duration",      required_argument, NULL, 'd' },
        { "output-prefix", required_argument, NULL, 'p' },
        { "m3u8-file",     required_argument, NULL, 'm' },
        { "url-prefix",    required_argument, NULL, 'u' },
        { "resume",        optional_argument, NULL, 'r' },
        { "num-segments",  required_argument, NULL, 'n' },
        { "help",          no_argument,       NULL, 'h' },
        { 0, 0, 0, 0 }
    };


    memset(&options, 0 ,sizeof(options));

    /* Set some defaults */
    options.segment_duration = 10;
    options.num_segments = 0;

    do {
        opt = getopt_long(argc, argv, optstring, longopts, &longindex );
        switch (opt) {
            case 'i':
                options.input_file = optarg;
                if (!strcmp(options.input_file, "-")) {
                    options.input_file = "pipe:";
                }
                break;

            case 'd':
                options.segment_duration = strtol(optarg, &endptr, 10);
                if (optarg == endptr || options.segment_duration < 0 || options.segment_duration == -LONG_MAX) {
                    fprintf(stderr, "Segment duration time (%s) invalid\n", optarg);
                    exit(1);
                }
                break;

            case 'p':
                options.output_prefix = optarg;
                break;

            case 'm':
                options.m3u8_file = optarg;
                break;

            case 'u':
                options.url_prefix = optarg;
                break;

            case 'r':
                if (optarg && strtol(optarg, &endptr, 10)) {
                    resume = strtol(optarg, &endptr, 10);
                } else {
                    resume = -1;
                }
                break;

            case 'n':
                options.num_segments = strtol(optarg, &endptr, 10);
                if (optarg == endptr || options.num_segments < 0 || options.num_segments >= LONG_MAX) {
                    fprintf(stderr, "Maximum number of ts files (%s) invalid\n", optarg);
                    exit(1);
                }
                break;

            case 'h':
                display_usage();
                break;
        }
    } while (opt != -1);


    /* Check required args where set*/
    if (options.input_file == NULL) {
        fprintf(stderr, "Please specify an input file.\n");
        exit(1);
    }

    if (options.output_prefix == NULL) {
        fprintf(stderr, "Please specify an output prefix.\n");
        exit(1);
    }

    if (options.m3u8_file == NULL) {
        fprintf(stderr, "Please specify an m3u8 output file.\n");
        exit(1);
    }

    if (options.url_prefix == NULL) {
        fprintf(stderr, "Please specify a url prefix.\n");
        exit(1);
    }

    av_register_all();
    remove_filename = malloc(sizeof(char) * (strlen(options.output_prefix) + 15));
    if (!remove_filename) {
        fprintf(stderr, "Could not allocate space for remove filenames\n");
        exit(1);
    }

    output_filename = malloc(sizeof(char) * (strlen(options.output_prefix) + 15));
    if (!output_filename) {
        fprintf(stderr, "Could not allocate space for output filenames\n");
        exit(1);
    }

    options.tmp_m3u8_file = malloc(strlen(options.m3u8_file) + 2);
    if (!options.tmp_m3u8_file) {
        fprintf(stderr, "Could not allocate space for temporary index filename\n");
        exit(1);
    }

    //check if we want to continue an existing stream
    if (resume != 0) {
        if (resume == -1) {
            last_chunk = get_last_chunk(options.output_prefix);
        } else {
            last_chunk = resume;
        }

        last_segment = last_chunk - 1;
        output_index = last_chunk;
    }

    // Use a dotfile as a temporary file
    strncpy(options.tmp_m3u8_file, options.m3u8_file, strlen(options.m3u8_file) + 2);
    dot = strrchr(options.tmp_m3u8_file, '/');
    dot = dot ? dot + 1 : options.tmp_m3u8_file;
    memmove(dot + 1, dot, strlen(dot));
    *dot = '.';

    ifmt = av_find_input_format("mpegts");
    if (!ifmt) {
        fprintf(stderr, "Could not find MPEG-TS demuxer\n");
        exit(1);
    }

    ret = avformat_open_input(&ic, options.input_file, ifmt, NULL);
    if (ret != 0) {
        fprintf(stderr, "Could not open input file, make sure it is an mpegts file: %d\n", ret);
        exit(1);
    }

    if (avformat_find_stream_info(ic, NULL) < 0) {
        fprintf(stderr, "Could not read stream information\n");
        exit(1);
    }

    ofmt = av_guess_format("mpegts", NULL, NULL);
    if (!ofmt) {
        fprintf(stderr, "Could not find MPEG-TS muxer\n");
        exit(1);
    }

    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Could not allocated output context");
        exit(1);
    }
    oc->oformat = ofmt;

    for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) {
        switch (ic->streams[i]->codec->codec_type) {
            case AVMEDIA_TYPE_VIDEO:
                video_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                video_st = add_output_stream(oc, ic->streams[i]);
                break;
            case AVMEDIA_TYPE_AUDIO:
                audio_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                audio_st = add_output_stream(oc, ic->streams[i]);
                break;
            default:
                ic->streams[i]->discard = AVDISCARD_ALL;
                break;
        }
    }

    // Don't print warnings when PTS and DTS are identical.
    ic->flags |= AVFMT_FLAG_IGNDTS;

    av_dump_format(oc, 0, options.output_prefix, 1);

    if (video_st) {
      codec = avcodec_find_decoder(video_st->codec->codec_id);
      if (!codec) {
          fprintf(stderr, "Could not find video decoder %x, key frames will not be honored\n", video_st->codec->codec_id);
      }

      if (avcodec_open2(video_st->codec, codec, NULL) < 0) {
          fprintf(stderr, "Could not open video decoder, key frames will not be honored\n");
      }
    }


    snprintf(output_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, output_index++);
    if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
        fprintf(stderr, "Could not open '%s'\n", output_filename);
        exit(1);
    }

    if (avformat_write_header(oc, NULL)) {
        fprintf(stderr, "Could not write mpegts header to first output file\n");
        exit(1);
    }

    write_index = !write_index_file(options, first_segment, last_segment, 0);

    /* Setup signals */
    memset(&act, 0, sizeof(act));
    act.sa_handler = &handler;

    sigaction(SIGINT, &act, NULL);
    sigaction(SIGTERM, &act, NULL);

    do {
        double segment_time = prev_segment_time;
        AVPacket packet;

        if (terminate) {
          break;
        }

        decode_done = av_read_frame(ic, &packet);
        if (decode_done < 0) {
            break;
        }

        if (av_dup_packet(&packet) < 0) {
            fprintf(stderr, "Could not duplicate packet");
            av_free_packet(&packet);
            break;
        }

        // Use video stream as time base and split at keyframes. Otherwise use audio stream
        if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY)) {
            segment_time = packet.pts * av_q2d(video_st->time_base);
        }
        else if (video_index < 0) {
            segment_time = packet.pts * av_q2d(audio_st->time_base);
        }
        else {
          segment_time = prev_segment_time;
        }


        if (segment_time - prev_segment_time >= options.segment_duration) {
            av_write_trailer(oc);   // close ts file and free memory
            avio_flush(oc->pb);
            avio_close(oc->pb);

            if (options.num_segments && (int)(last_segment - first_segment) >= options.num_segments - 1) {
                remove_file = 1;
                first_segment++;
            }
            else {
                remove_file = 0;
            }

            if (write_index) {
                write_index = !write_index_file(options, first_segment, ++last_segment, 0);
            }

            if (remove_file) {
                snprintf(remove_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, first_segment - 1);
                remove(remove_filename);
            }

            snprintf(output_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, output_index++);
            if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
                fprintf(stderr, "Could not open '%s'\n", output_filename);
                break;
            }

            // Write a new header at the start of each file
            if (avformat_write_header(oc, NULL)) {
              fprintf(stderr, "Could not write mpegts header to first output file\n");
              exit(1);
            }

            prev_segment_time = segment_time;
        }

        ret = av_interleaved_write_frame(oc, &packet);
        if (ret < 0) {
            fprintf(stderr, "Warning: Could not write frame of stream\n");
        }
        else if (ret > 0) {
            fprintf(stderr, "End of stream requested\n");
            av_free_packet(&packet);
            break;
        }

        av_free_packet(&packet);
    } while (!decode_done);

    av_write_trailer(oc);

    if (video_st) {
      avcodec_close(video_st->codec);
    }

    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    avio_close(oc->pb);
    av_free(oc);

    if (options.num_segments && (int)(last_segment - first_segment) >= options.num_segments - 1) {
        remove_file = 1;
        first_segment++;
    }
    else {
        remove_file = 0;
    }

    if (write_index) {
        write_index_file(options, first_segment, ++last_segment, 1);
    }

    if (remove_file) {
        snprintf(remove_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, first_segment - 1);
        remove(remove_filename);
    }

    return 0;
}
Пример #19
0
MediaRet MediaRecorder::AddFrame(const u8 *vid)
{
    if(!oc || !vid_st)
	return MRET_OK;

    AVCodecContext *ctx = vid_st->codec;
    AVPacket pkt;

    // strip borders.  inconsistent between depths for some reason
    // but fortunately consistent between gb/gba.
    int tbord, rbord;
    switch(pixsize) {
    case 2:
	//    16-bit: 2 @ right, 1 @ top
	tbord = 1; rbord = 2; break;
    case 3:
	//    24-bit: no border
	tbord = rbord = 0; break;
    case 4:
	//    32-bit: 1 @ right, 1 @ top
	tbord = 1; rbord = 1; break;
    }
    avpicture_fill((AVPicture *)pic, (uint8_t *)vid + tbord * (linesize + pixsize * rbord),
		   (PixelFormat)pixfmt, ctx->width + rbord, ctx->height);
    // satisfy stupid sws_scale()'s integrity check
    pic->data[1] = pic->data[2] = pic->data[3] = pic->data[0];
    pic->linesize[1] = pic->linesize[2] = pic->linesize[3] = pic->linesize[0];

    AVFrame *f = pic;

    if(converter) {
	sws_scale(converter, pic->data, pic->linesize, 0, ctx->height,
		  convpic->data, convpic->linesize);
	f = convpic;
    }
    av_init_packet(&pkt);
    pkt.stream_index = vid_st->index;
    if(oc->oformat->flags & AVFMT_RAWPICTURE) {
	// this won't work due to border
	// not sure what formats set this, anyway
	pkt.flags |= AV_PKT_FLAG_KEY;
	pkt.data = f->data[0];
	pkt.size = linesize * ctx->height;
    } else {
	pkt.size = avcodec_encode_video(ctx, video_buf, VIDEO_BUF_LEN, f);
	if(!pkt.size)
	    return MRET_OK;
	if(ctx->coded_frame && ctx->coded_frame->pts != AV_NOPTS_VALUE)
	    pkt.pts = av_rescale_q(ctx->coded_frame->pts, ctx->time_base, vid_st->time_base);
	if(pkt.size > VIDEO_BUF_LEN) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_BUFSIZE;
	}
	if(ctx->coded_frame->key_frame)
	    pkt.flags |= AV_PKT_FLAG_KEY;
	pkt.data = video_buf;
    }
    if(av_interleaved_write_frame(oc, &pkt) < 0) {
	avformat_free_context(oc);
	oc = NULL;
	// yeah, err might not be a file error, but if it isn't, it's a
	// coding error rather than a user-controllable error
	// and better resolved using debugging
	return MRET_ERR_FERR;
    }
    return MRET_OK;
}
Пример #20
0
int CFfmpeg::ReadData(int nNeed, unsigned char* pBuffer, int* pnRead)
{
	int buffer_read = 0;
	int ret;
	AVPacket pkt;

	pthread_mutex_lock(&iolock);
	if(!transcode)
	{
		ret = fread(pBuffer, 1, nNeed, m_pFp);
		if(ret<=0)
		{
			FFMPEG_ERROR("fread: %s", strerror(errno));
		}
		else
		{
			curpos += ret;
			buffer_read = ret;
		}
	}
	else
	{
		while(1)
		{
			ret = ring_buffer_datasize(&outputringbuffer);
			if(ret > 0)
			{
				int reqsize = nNeed - buffer_read;
				if(ret < reqsize)
					reqsize = ret;
				ret = ring_buffer_read(&outputringbuffer, pBuffer+buffer_read, reqsize);
				if(ret == 0)
				{
					buffer_read += reqsize;
				}
			}
			if(buffer_read >= nNeed)
				break;
			ret = av_read_frame(infmt_ctx, &pkt);
			if(ret == AVERROR_EOF)
			{
				FFMPEG_INFO("=== AVERROR_EOF");
				eof = 1;
				*pnRead = buffer_read;
				//pthread_mutex_lock(&iolock);
				curpos = avio_seek(infmt_ctx->pb, 0, SEEK_CUR);
				pthread_mutex_unlock(&iolock);
				return buffer_read;
			}
			else if(ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if(ret < 0)
			{
				FFMPEG_WARN("av_read_frame return %d", ret);
				*pnRead = buffer_read;
				//pthread_mutex_lock(&iolock);
				curpos = avio_seek(infmt_ctx->pb, 0, SEEK_CUR);
				pthread_mutex_unlock(&iolock);
				return buffer_read;
			}
			else if(ret >= 0)
			{
				AVStream *ost, *ist;
				AVFrame avframe;
				AVPacket opkt;
				int64_t pts_base;
				av_init_packet(&opkt);
				//opkt = pkt;
				if(pkt.stream_index == video)
				{
					ost = vst;
					ist = infmt_ctx->streams[video];
					opkt.stream_index = vst->index;
					pts_base = vdts_base;
					//printf("pts=%lld, dts=%lld, duration=%d\n", pkt.pts, pkt.dts, pkt.duration);
				}
				else if(pkt.stream_index == audio1)
				{
					ost = ast1;
					ist = infmt_ctx->streams[audio1];
					opkt.stream_index = ast1->index;
					pts_base = a1dts_base;

					if(acodec1)
					{
						uint8_t outbuffer[4096];
						int sample_size = sizeof(adecbuffer1);
						int frame_size = ost->codec->frame_size * 4;
						//FFMPEG_DEBUG("before decode, pts=0x%llx, dts=0x%llx", pkt.pts, pkt.dts);
						avcodec_decode_audio3(ist->codec, adecbuffer1, &sample_size, &pkt);
						//FFMPEG_DEBUG("after decode, pts=0x%llx, dts=0x%llx", pkt.pts, pkt.dts);
						ring_buffer_write(&adecrbuffer1, (uint8_t*)adecbuffer1, sample_size);
						while(ring_buffer_datasize(&adecrbuffer1) > frame_size)
						{
							av_init_packet(&opkt);
							ring_buffer_read(&adecrbuffer1, (uint8_t*)adecbuffer1, frame_size);
							ret = avcodec_encode_audio(ost->codec, outbuffer, sizeof(outbuffer), adecbuffer1);
							//printf("ret=%d\n", ret);
							opkt.data = outbuffer;
							opkt.size = ret;
							opkt.stream_index = ast1->index;
							if(pkt.pts != AV_NOPTS_VALUE)
								opkt.pts = av_rescale_q(pkt.pts, ist->time_base, ost->time_base)+pts_base;
							else
								opkt.pts = AV_NOPTS_VALUE;
							if (pkt.dts != AV_NOPTS_VALUE)
								opkt.dts = av_rescale_q(pkt.dts, ist->time_base, ost->time_base)+pts_base;
							else
								opkt.dts = av_rescale_q(pkt.dts, AV_TIME_BASE_Q, ost->time_base)+pts_base;
							opkt.duration = av_rescale_q(pkt.duration, ist->time_base, ost->time_base);
							opkt.flags |= AV_PKT_FLAG_KEY;
							//FFMPEG_DEBUG("audio1 rescaled, pts=0x%llx, dts=0x%llx", opkt.pts, opkt.dts);
							ret = av_interleaved_write_frame(oc, &opkt);
							if(ret != 0)
							{
								FFMPEG_ERROR("av_interleaved_write_frame ret %d", ret);
							}
							ost->codec->frame_number++;
							av_free_packet(&opkt);
						}
						av_free_packet(&pkt);
						continue;
					}
				}
				else if(pkt.stream_index == audio2)
				{
					ost = ast2;
					ist = infmt_ctx->streams[audio2];
					opkt.stream_index = ast2->index;
					pts_base = a2dts_base;

					if(acodec2)
					{
						uint8_t outbuffer[4096];
						int sample_size = sizeof(adecbuffer1);
						int frame_size = ost->codec->frame_size * 4;
						avcodec_decode_audio3(ist->codec, adecbuffer2, &sample_size, &pkt);
						ring_buffer_write(&adecrbuffer2, (uint8_t*)adecbuffer2, sample_size);
						while(ring_buffer_datasize(&adecrbuffer2) > frame_size)
						{
							av_init_packet(&opkt);
							ring_buffer_read(&adecrbuffer2, (uint8_t*)adecbuffer2, frame_size);
							ret = avcodec_encode_audio(ost->codec, outbuffer, sizeof(outbuffer), adecbuffer2);
							//printf("ret=%d\n", ret);
							opkt.data = outbuffer;
							opkt.size = ret;
							opkt.stream_index = ast2->index;
							if(pkt.pts != AV_NOPTS_VALUE)
								opkt.pts = av_rescale_q(pkt.pts, ist->time_base, ost->time_base)+pts_base;
							else
								opkt.pts = AV_NOPTS_VALUE;
							if (pkt.dts != AV_NOPTS_VALUE)
								opkt.dts = av_rescale_q(pkt.dts, ist->time_base, ost->time_base)+pts_base;
							else
								opkt.dts = av_rescale_q(pkt.dts, AV_TIME_BASE_Q, ost->time_base)+pts_base;
							opkt.duration = av_rescale_q(pkt.duration, ist->time_base, ost->time_base);
							opkt.flags |= AV_PKT_FLAG_KEY;
							ret = av_interleaved_write_frame(oc, &opkt);
							if(ret != 0)
							{
								FFMPEG_ERROR("av_interleaved_write_frame ret %d", ret);
							}
							ost->codec->frame_number++;
							av_free_packet(&opkt);
						}
						av_free_packet(&pkt);
						continue;
					}
				}
				else
				{
					av_free_packet(&pkt);
					continue;
				}

				avcodec_get_frame_defaults(&avframe);
				ost->codec->coded_frame = &avframe;
				avframe.key_frame = pkt.flags & AV_PKT_FLAG_KEY;
				if(pkt.pts != AV_NOPTS_VALUE)
					opkt.pts = av_rescale_q(pkt.pts, ist->time_base, ost->time_base)+pts_base;
				else
					opkt.pts = AV_NOPTS_VALUE;
				if (pkt.dts != AV_NOPTS_VALUE)
					opkt.dts = av_rescale_q(pkt.dts, ist->time_base, ost->time_base)+pts_base;
				else
					opkt.dts = av_rescale_q(pkt.dts, AV_TIME_BASE_Q, ost->time_base)+pts_base;
				opkt.duration = av_rescale_q(pkt.duration, ist->time_base, ost->time_base);
				opkt.data = pkt.data;
				opkt.size = pkt.size;
				if(bsfc && pkt.stream_index == video)
				{
					//printf("rescale pts=%lld, dts=%lld, duration=%d\n", opkt.pts, opkt.dts, opkt.duration);
					AVPacket new_pkt = opkt;
					int a = av_bitstream_filter_filter(bsfc, ost->codec, NULL, &new_pkt.data, &new_pkt.size, opkt.data, opkt.size, opkt.flags & AV_PKT_FLAG_KEY);
					if(a>0)
					{
						av_free_packet(&opkt);
						new_pkt.destruct = av_destruct_packet;
					}
					else if(a<0)
					{
						FFMPEG_ERROR("av_bitstream_filter_filter ret %d", a);
					}
					opkt = new_pkt;
				}
				ret = av_interleaved_write_frame(oc, &opkt);
				if(ret != 0)
				{
					FFMPEG_ERROR("av_interleaved_write_frame ret %d", ret);
				}
				ost->codec->frame_number++;
			}
			av_free_packet(&pkt);
		}
		curpos = avio_seek(infmt_ctx->pb, 0, SEEK_CUR);
	}
	*pnRead = buffer_read;
	pthread_mutex_unlock(&iolock);
	return buffer_read;
}
Пример #21
0
HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioFrame)
{
    void *frameBytes;
    void *audioFrameBytes;
    BMDTimeValue frameTime;
    BMDTimeValue frameDuration;

    frameCount++;
    // Handle Video Frame
    if(videoFrame)
    {
        if (videoFrame->GetFlags() & bmdFrameHasNoInputSource)
        {
            fprintf(stderr, "Frame received (#%lu) - No input signal detected\n", frameCount);
            return S_OK;
        } else {
            AVPacket pkt;
            AVCodecContext *c;
            av_init_packet(&pkt);
            c = video_st->codec;
            //fprintf(stderr, "Frame received (#%lu) - Valid Frame (Size: %li bytes)\n", frameCount, videoFrame->GetRowBytes() * videoFrame->GetHeight());
            videoFrame->GetBytes(&frameBytes);
            avpicture_fill((AVPicture*)picture, (uint8_t *)frameBytes,
                           PIX_FMT_UYVY422,
                           videoFrame->GetWidth(), videoFrame->GetHeight());
            videoFrame->GetStreamTime(&frameTime, &frameDuration,
                                      video_st->time_base.den);
            pkt.pts = pkt.dts = frameTime/video_st->time_base.num;
            pkt.duration = frameDuration;
            //To be made sure it still applies
            pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index= video_st->index;
            pkt.data= (uint8_t *)frameBytes;
            pkt.size= videoFrame->GetRowBytes() * videoFrame->GetHeight();
	    //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
	    c->frame_number++;
            av_interleaved_write_frame(oc, &pkt);
            //write(videoOutputFile, frameBytes, videoFrame->GetRowBytes() * videoFrame->GetHeight());
        }
//        frameCount++;

        if (g_maxFrames > 0 && frameCount >= g_maxFrames)
        {
            pthread_cond_signal(&sleepCond);
        }
    }

    // Handle Audio Frame
    if (audioFrame)
    {
            AVCodecContext *c;
            AVPacket pkt;
	    BMDTimeValue audio_pts;
            av_init_packet(&pkt);

            c = audio_st->codec;
            //hack among hacks
            pkt.size =  audioFrame->GetSampleFrameCount() *
                             g_audioChannels * (g_audioSampleDepth / 8);
            audioFrame->GetBytes(&audioFrameBytes);
            audioFrame->GetPacketTime(&audio_pts, audio_st->time_base.den);
	    pkt.dts = pkt.pts= audio_pts/audio_st->time_base.num;
	    //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
            pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index= audio_st->index;
            pkt.data = (uint8_t *)audioFrameBytes;
//            pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
	    c->frame_number++;
            //write(audioOutputFile, audioFrameBytes, audioFrame->GetSampleFrameCount() * g_audioChannels * (g_audioSampleDepth / 8));
            if (av_interleaved_write_frame(oc, &pkt) != 0) {
                fprintf(stderr, "Error while writing audio frame\n");
                exit(1);
            }
    }
    return S_OK;
}
Пример #22
0
/**
* @brief 
*
* @return 
*/
int Mp4FileOutput::run()
{
    //const int MAX_EVENT_HEAD_AGE = 2;    ///< Number of seconds of video before event to save
    const int MAX_EVENT_TAIL_AGE = 3;    ///< Number of seconds of video after event to save

    typedef enum { IDLE, PREALARM, ALARM, ALERT } AlarmState;

    if ( waitForProviders() )
    {
        /* auto detect the output format from the name. default is mpeg. */
        AVOutputFormat *outputFormat = av_guess_format( mExtension.c_str(), NULL, NULL );
        if ( !outputFormat )
            Fatal( "Could not deduce output format from '%s'", mExtension.c_str() );
        //AVFormatContext *outputContext = openFile( outputFormat );
        AVFormatContext *outputContext = NULL;

        double videoTimeOffset = 0.0L;
        uint64_t videoFrameCount = 0;
        AlarmState alarmState = IDLE;
        uint64_t alarmTime = 0;
        int eventCount = 0;
        while( !mStop )
        {
            while( !mStop )
            {
                mQueueMutex.lock();
                if ( !mFrameQueue.empty() )
                {
                    for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ )
                    {
                        const FeedFrame *frame = iter->get();

                        Debug( 3, "Frame type %d", frame->mediaType() );
                        if ( frame->mediaType() == FeedFrame::FRAME_TYPE_VIDEO )
                        {
                        	// This is an alarm detection frame
                            const MotionFrame *motionFrame = dynamic_cast<const MotionFrame *>(frame);
                            //const VideoProvider *provider = dynamic_cast<const VideoProvider *>(frame->provider());

                            AlarmState lastAlarmState = alarmState;
                            uint64_t now = time64();

                            Debug( 3, "Motion frame, alarmed %d", motionFrame->alarmed() );
                            if ( motionFrame->alarmed() )
                            {
                                alarmState = ALARM;
                                alarmTime = now;
                                if ( lastAlarmState == IDLE )
                                {
                                    // Create new event
                                    eventCount++;
                                    std::string path = stringtf( "%s/img-%s-%d-%ju.jpg", mLocation.c_str(), mName.c_str(), eventCount, motionFrame->id() );
                                    //Info( "PF:%d @ %dx%d", motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height() );
                                    Image image( motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height(), motionFrame->buffer().data() );
                                    image.writeJpeg( path.c_str() );
                                }
                            }
                            else if ( lastAlarmState == ALARM )
                            {
                                alarmState = ALERT;
                            }
                            else if ( lastAlarmState == ALERT )
                            {
                            	Debug( 3, "Frame age %.2lf", frame->age( alarmTime ) );
                                if ( (0.0l-frame->age( alarmTime )) > MAX_EVENT_TAIL_AGE )
                                    alarmState = IDLE;
                            }
                            else
                            {
                            	alarmState = IDLE;
                            }
                            Debug( 3, "Alarm state %d (%d)", alarmState, lastAlarmState );
                        }
                        else
                        {
							bool keyFrame = false;
							const uint8_t *startPos = h264StartCode( frame->buffer().head(), frame->buffer().tail() );
							while ( startPos < frame->buffer().tail() )
							{
								while( !*(startPos++) )
									;
								const uint8_t *nextStartPos = h264StartCode( startPos, frame->buffer().tail() );

								int frameSize = nextStartPos-startPos;

								unsigned char type = startPos[0] & 0x1F;
								unsigned char nri = startPos[0] & 0x60;
								Debug( 3, "Frame Type %d, NRI %d (%02x), %d bytes, ts %jd", type, nri>>5, startPos[0], frameSize, frame->timestamp() );

								if ( type == NAL_IDR_SLICE )
									keyFrame = true;
								startPos = nextStartPos;
							}

							videoTimeOffset += (double)mVideoParms.frameRate().num / mVideoParms.frameRate().den;

							if ( keyFrame )
							{
								// We can do file opening/closing now
								if ( alarmState != IDLE && !outputContext )
								{
									outputContext = openFile( outputFormat );
									videoTimeOffset = 0.0L;
									videoFrameCount = 0;
								}
								else if ( alarmState == IDLE && outputContext )
								{
									closeFile( outputContext );
									outputContext = NULL;
								}
							}
							/*if ( keyFrame && (videoTimeOffset >= mMaxLength) )
							{
								closeFile( outputContext );
								outputContext = openFile( outputFormat );
								videoTimeOffset = 0.0L;
								videoFrameCount = 0;
							}*/

							if ( outputContext )
							{
								AVStream *videoStream = outputContext->streams[0];
								AVCodecContext *videoCodecContext = videoStream->codec;

								AVPacket packet;
								av_init_packet(&packet);

								packet.flags |= keyFrame ? AV_PKT_FLAG_KEY : 0;
								packet.stream_index = videoStream->index;
								packet.data = (uint8_t*)frame->buffer().data();
								packet.size = frame->buffer().size();
								//packet.pts = packet.dts = AV_NOPTS_VALUE;
								packet.pts = packet.dts = (videoFrameCount * mVideoParms.frameRate().num * videoCodecContext->time_base.den) / (mVideoParms.frameRate().den * videoCodecContext->time_base.num);
								Info( "vfc: %ju, vto: %.2lf, kf: %d, pts: %jd", videoFrameCount, videoTimeOffset, keyFrame, packet.pts );

								int result = av_interleaved_write_frame(outputContext, &packet);
								if ( result != 0 )
									Fatal( "Error while writing video frame: %d", result );
							}

							videoFrameCount++;
                        }
                    }
                    mFrameQueue.clear();
                }
                mQueueMutex.unlock();
                checkProviders();
                usleep( INTERFRAME_TIMEOUT );
            }
        }
        if ( outputContext )
        	closeFile( outputContext );
    }
    cleanup();
    return 0;
}
Пример #23
0
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *img_convert_ctx;

    c = st->codec;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* no more frame to compress. The codec has a latency of a few
           frames if using B frames, so we get the last frames by
           passing the same picture again */
    } else {
        if (c->pix_fmt != PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
               to the codec pixel format if needed */
            if (img_convert_ctx == NULL) {
                img_convert_ctx = sws_getContext(c->width, c->height,
                                                 PIX_FMT_YUV420P,
                                                 c->width, c->height,
                                                 c->pix_fmt,
                                                 sws_flags, NULL, NULL, NULL);
                if (img_convert_ctx == NULL) {
                    fprintf(stderr, "Cannot initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
                      0, c->height, picture->data, picture->linesize);
        } else {
            fill_yuv_image(picture, frame_count, c->width, c->height);
        }
    }


    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
           futur for that */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index= st->index;
        pkt.data= (uint8_t *)picture;
        pkt.size= sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
        /* if zero size, it means the image was buffered */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
            if(c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index= st->index;
            pkt.data= video_outbuf;
            pkt.size= out_size;

            /* write the compressed frame in the media file */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
    frame_count++;
}
Пример #24
0
int VideoFFmpegWriter::execute( boost::uint8_t* in_buffer, int in_width, int in_height, PixelFormat in_pixelFormat )
{
	_error = IGNORE_FINISH;

	AVOutputFormat* fmt = 0;
	fmt = guess_format( _format.c_str(), NULL, NULL );
	if( !fmt )
	{
		fmt = guess_format( NULL, filename().c_str(), NULL );
		if( !fmt )
		{
			std::cerr << "ffmpegWriter: could not deduce output format from file extension." << std::endl;
			return false;
		}
	}

	if( !_avformatOptions )
		_avformatOptions = avformat_alloc_context();

	_avformatOptions->oformat = fmt;
	snprintf( _avformatOptions->filename, sizeof( _avformatOptions->filename ), "%s", filename().c_str() );

	if( !_stream )
	{
		_stream = av_new_stream( _avformatOptions, 0 );
		if( !_stream )
		{
			std::cout << "ffmpegWriter: out of memory." << std::endl;
			return false;
		}

		CodecID codecId    = fmt->video_codec;
		AVCodec* userCodec = avcodec_find_encoder_by_name( _codec.c_str() );
		if( userCodec )
			codecId = userCodec->id;

		_stream->codec->codec_id           = codecId;
		_stream->codec->codec_type         = CODEC_TYPE_VIDEO;
		_stream->codec->bit_rate           = _bitRate;
		_stream->codec->bit_rate_tolerance = _bitRateTolerance;
		_stream->codec->width              = width();
		_stream->codec->height             = height();
		_stream->codec->time_base          = av_d2q( 1.0 / _fps, 100 );
		_stream->codec->gop_size           = _gopSize;
		if( _bFrames )
		{
			_stream->codec->max_b_frames     = _bFrames;
			_stream->codec->b_frame_strategy = 0;
			_stream->codec->b_quant_factor   = 2.0;
		}
		_stream->codec->mb_decision = _mbDecision;
		_stream->codec->pix_fmt     = _out_pixelFormat;

		if( !strcmp( _avformatOptions->oformat->name, "mp4" ) || !strcmp( _avformatOptions->oformat->name, "mov" ) || !strcmp( _avformatOptions->oformat->name, "3gp" ) || !strcmp( _avformatOptions->oformat->name, "flv" ) )
			_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

		if( av_set_parameters( _avformatOptions, NULL ) < 0 )
		{
			std::cout << "ffmpegWriter: unable to set parameters." << std::endl;
			freeFormat();
			return false;
		}

		dump_format( _avformatOptions, 0, filename().c_str(), 1 );

		AVCodec* videoCodec = avcodec_find_encoder( codecId );
		if( !videoCodec )
		{
			std::cout << "ffmpegWriter: unable to find codec." << std::endl;
			freeFormat();
			return false;
		}

		if( avcodec_open( _stream->codec, videoCodec ) < 0 )
		{
			std::cout << "ffmpegWriter: unable to open codec." << std::endl;
			freeFormat();
			return false;
		}

		if( !( fmt->flags & AVFMT_NOFILE ) )
		{
			if( url_fopen( &_avformatOptions->pb, filename().c_str(), URL_WRONLY ) < 0 )
			{
				std::cout << "ffmpegWriter: unable to open file." << std::endl;
				return false;
			}
		}

		av_write_header( _avformatOptions );
	}

	_error = CLEANUP;

	AVFrame* in_frame = avcodec_alloc_frame();
	avcodec_get_frame_defaults( in_frame );
	avpicture_fill( (AVPicture*)in_frame, in_buffer, in_pixelFormat, in_width, in_height );

	AVFrame* out_frame = avcodec_alloc_frame();
	avcodec_get_frame_defaults( out_frame );
	int out_picSize            = avpicture_get_size( _out_pixelFormat, width(), height() );
	boost::uint8_t* out_buffer = (boost::uint8_t*) av_malloc( out_picSize );
	avpicture_fill( (AVPicture*) out_frame, out_buffer, _out_pixelFormat, width(), height() );

	_sws_context = sws_getCachedContext( _sws_context, in_width, in_height, in_pixelFormat, width(), height(), _out_pixelFormat, SWS_BICUBIC, NULL, NULL, NULL );

	std::cout << "ffmpegWriter: input format: " << pixelFormat_toString( in_pixelFormat ) << std::endl;
	std::cout << "ffmpegWriter: output format: " << pixelFormat_toString( _out_pixelFormat ) << std::endl;

	if( !_sws_context )
	{
		std::cout << "ffmpeg-conversion failed (" << in_pixelFormat << "->" << _out_pixelFormat << ")." << std::endl;
		return false;
	}
	int error = sws_scale( _sws_context, in_frame->data, in_frame->linesize, 0, height(), out_frame->data, out_frame->linesize );
	if( error < 0 )
	{
		std::cout << "ffmpeg-conversion failed (" << in_pixelFormat << "->" << _out_pixelFormat << ")." << std::endl;
		return false;
	}

	int ret = 0;
	if( ( _avformatOptions->oformat->flags & AVFMT_RAWPICTURE ) != 0 )
	{
		AVPacket pkt;
		av_init_packet( &pkt );
		pkt.flags       |= PKT_FLAG_KEY;
		pkt.stream_index = _stream->index;
		pkt.data         = (boost::uint8_t*) out_frame;
		pkt.size         = sizeof( AVPicture );
		ret              = av_interleaved_write_frame( _avformatOptions, &pkt );
	}
	else
	{
		boost::uint8_t* out_buffer = (boost::uint8_t*) av_malloc( out_picSize );

		ret = avcodec_encode_video( _stream->codec, out_buffer, out_picSize, out_frame );

		if( ret > 0 )
		{
			AVPacket pkt;
			av_init_packet( &pkt );

			if( _stream->codec->coded_frame && _stream->codec->coded_frame->pts != static_cast<boost::int64_t>( AV_NOPTS_VALUE ) ) // static_cast<unsigned long> (
				pkt.pts = av_rescale_q( _stream->codec->coded_frame->pts, _stream->codec->time_base, _stream->time_base );

			if( _stream->codec->coded_frame && _stream->codec->coded_frame->key_frame )
				pkt.flags |= PKT_FLAG_KEY;

			pkt.stream_index = _stream->index;
			pkt.data         = out_buffer;
			pkt.size         = ret;
			ret              = av_interleaved_write_frame( _avformatOptions, &pkt );
		}

		av_free( out_buffer );
	}

	av_free( out_buffer );
	av_free( out_frame );
	av_free( in_frame );
	// in_buffer not free (function parameter)

	if( ret )
	{
		std::cout << "ffmpegWriter: error writing frame to file." << std::endl;
		return false;
	}

	_error = SUCCESS;
	return true;
}
Пример #25
0
int main(int argc, char **argv)
{
    const char *input;
    const char *output_prefix;
    double segment_duration;
    char *segment_duration_check;
    const char *index;
    char *tmp_index;
    const char *http_prefix;
    long max_tsfiles = 0;
    char *max_tsfiles_check;
    double prev_segment_time = 0;
    unsigned int output_index = 1;
    AVInputFormat *ifmt;
    AVOutputFormat *ofmt;
    AVFormatContext *ic = NULL;
    AVFormatContext *oc;
    AVStream *video_st;
    AVStream *audio_st;
    AVCodec *codec;
    char *output_filename;
    char *remove_filename;
    int video_index;
    int audio_index;
    unsigned int first_segment = 1;
    unsigned int last_segment = 0;
    int write_index = 1;
    int decode_done;
    char *dot;
    int ret;
    int i;
    int remove_file;

    if (argc < 6 || argc > 7) {
        fprintf(stderr, "Usage: %s <input MPEG-TS file> <segment duration in seconds> <output MPEG-TS file prefix> <output m3u8 index file> <http prefix> [<segment window size>]\n", argv[0]);
        exit(1);
    }

    av_register_all();

    input = argv[1];
    if (!strcmp(input, "-")) {
        input = "pipe:";
    }
    segment_duration = strtod(argv[2], &segment_duration_check);
    if (segment_duration_check == argv[2] || segment_duration == HUGE_VAL || segment_duration == -HUGE_VAL) {
        fprintf(stderr, "Segment duration time (%s) invalid\n", argv[2]);
        exit(1);
    }
    output_prefix = argv[3];
    index = argv[4];
    http_prefix=argv[5];
    if (argc == 7) {
        max_tsfiles = strtol(argv[6], &max_tsfiles_check, 10);
        if (max_tsfiles_check == argv[6] || max_tsfiles < 0 || max_tsfiles >= INT_MAX) {
            fprintf(stderr, "Maximum number of ts files (%s) invalid\n", argv[6]);
            exit(1);
        }
    }

    remove_filename = malloc(sizeof(char) * (strlen(output_prefix) + 15));
    if (!remove_filename) {
        fprintf(stderr, "Could not allocate space for remove filenames\n");
        exit(1);
    }

    output_filename = malloc(sizeof(char) * (strlen(output_prefix) + 15));
    if (!output_filename) {
        fprintf(stderr, "Could not allocate space for output filenames\n");
        exit(1);
    }

    tmp_index = malloc(strlen(index) + 2);
    if (!tmp_index) {
        fprintf(stderr, "Could not allocate space for temporary index filename\n");
        exit(1);
    }

    strncpy(tmp_index, index, strlen(index) + 2);
    dot = strrchr(tmp_index, '/');
    dot = dot ? dot + 1 : tmp_index;
    for (i = strlen(tmp_index) + 1; i > dot - tmp_index; i--) {
        tmp_index[i] = tmp_index[i - 1];
    }
    *dot = '.';

    ifmt = av_find_input_format("mpegts");
    if (!ifmt) {
        fprintf(stderr, "Could not find MPEG-TS demuxer\n");
        exit(1);
    }

    ret = av_open_input_file(&ic, input, ifmt, 0, NULL);
    if (ret != 0) {
        fprintf(stderr, "Could not open input file, make sure it is an mpegts file: %d\n", ret);
        exit(1);
    }

    if (av_find_stream_info(ic) < 0) {
        fprintf(stderr, "Could not read stream information\n");
        exit(1);
    }

    ofmt = av_guess_format("mpegts", NULL, NULL);
    if (!ofmt) {
        fprintf(stderr, "Could not find MPEG-TS muxer\n");
        exit(1);
    }

    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Could not allocated output context");
        exit(1);
    }
    oc->oformat = ofmt;

    video_index = -1;
    audio_index = -1;

    for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) {
        switch (ic->streams[i]->codec->codec_type) {
            case AVMEDIA_TYPE_VIDEO:
                video_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                video_st = add_output_stream(oc, ic->streams[i]);
                break;
            case AVMEDIA_TYPE_AUDIO:
                audio_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                audio_st = add_output_stream(oc, ic->streams[i]);
                break;
            default:
                ic->streams[i]->discard = AVDISCARD_ALL;
                break;
        }
    }

    if (av_set_parameters(oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }

    dump_format(oc, 0, output_prefix, 1);

    codec = avcodec_find_decoder(video_st->codec->codec_id);
    if (!codec) {
        fprintf(stderr, "Could not find video decoder, key frames will not be honored\n");
    }

    if (avcodec_open(video_st->codec, codec) < 0) {
        fprintf(stderr, "Could not open video decoder, key frames will not be honored\n");
    }

    snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, output_index++);
    if (url_fopen(&oc->pb, output_filename, URL_WRONLY) < 0) {
        fprintf(stderr, "Could not open '%s'\n", output_filename);
        exit(1);
    }

    if (av_write_header(oc)) {
        fprintf(stderr, "Could not write mpegts header to first output file\n");
        exit(1);
    }

    write_index = !write_index_file(index, tmp_index, segment_duration, output_prefix, http_prefix, first_segment, last_segment, 0, max_tsfiles);

    do {
        double segment_time;
        AVPacket packet;

        decode_done = av_read_frame(ic, &packet);
        if (decode_done < 0) {
            break;
        }

        if (av_dup_packet(&packet) < 0) {
            fprintf(stderr, "Could not duplicate packet");
            av_free_packet(&packet);
            break;
        }

        if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY)) {
            segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
        }
        else if (video_index < 0) {
            segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        }
        else {
            segment_time = prev_segment_time;
        }

        if (segment_time - prev_segment_time >= segment_duration) {
            put_flush_packet(oc->pb);
            url_fclose(oc->pb);

            if (max_tsfiles && (int)(last_segment - first_segment) >= max_tsfiles - 1) {
                remove_file = 1;
                first_segment++;
            }
            else {
                remove_file = 0;
            }

            if (write_index) {
                write_index = !write_index_file(index, tmp_index, segment_duration, output_prefix, http_prefix, first_segment, ++last_segment, 0, max_tsfiles);
            }

            if (remove_file) {
                snprintf(remove_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, first_segment - 1);
                remove(remove_filename);
            }

            snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, output_index++);
            if (url_fopen(&oc->pb, output_filename, URL_WRONLY) < 0) {
                fprintf(stderr, "Could not open '%s'\n", output_filename);
                break;
            }

            prev_segment_time = segment_time;
        }

        ret = av_interleaved_write_frame(oc, &packet);
        if (ret < 0) {
            fprintf(stderr, "Warning: Could not write frame of stream\n");
        }
        else if (ret > 0) {
            fprintf(stderr, "End of stream requested\n");
            av_free_packet(&packet);
            break;
        }

        av_free_packet(&packet);
    } while (!decode_done);

    av_write_trailer(oc);

    avcodec_close(video_st->codec);

    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    url_fclose(oc->pb);
    av_free(oc);

    if (max_tsfiles && (int)(last_segment - first_segment) >= max_tsfiles - 1) {
        remove_file = 1;
        first_segment++;
    }
    else {
        remove_file = 0;
    }

    if (write_index) {
        write_index_file(index, tmp_index, segment_duration, output_prefix, http_prefix, first_segment, ++last_segment, 1, max_tsfiles);
    }

    if (remove_file) {
        snprintf(remove_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, first_segment - 1);
        remove(remove_filename);
    }

    return 0;
}
Пример #26
0
static int write_audio_frame(FFMpegContext *context)
{
	AVCodecContext *c = NULL;
	AVPacket pkt;
	AVFrame *frame = NULL;
	int got_output = 0;

	c = context->audio_stream->codec;

	av_init_packet(&pkt);
	pkt.size = 0;
	pkt.data = NULL;

	AUD_Device_read(context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
	context->audio_time += (double) context->audio_input_samples / (double) c->sample_rate;

#ifdef FFMPEG_HAVE_ENCODE_AUDIO2
	frame = avcodec_alloc_frame();
	avcodec_get_frame_defaults(frame);
	frame->pts = context->audio_time / av_q2d(c->time_base);
	frame->nb_samples = context->audio_input_samples;
	frame->format = c->sample_fmt;
#ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
	frame->channel_layout = c->channel_layout;
#endif

	if (context->audio_deinterleave) {
		int channel, i;
		uint8_t *temp;

		for (channel = 0; channel < c->channels; channel++) {
			for (i = 0; i < frame->nb_samples; i++) {
				memcpy(context->audio_deinterleave_buffer + (i + channel * frame->nb_samples) * context->audio_sample_size,
					   context->audio_input_buffer + (c->channels * i + channel) * context->audio_sample_size, context->audio_sample_size);
			}
		}

		temp = context->audio_deinterleave_buffer;
		context->audio_deinterleave_buffer = context->audio_input_buffer;
		context->audio_input_buffer = temp;
	}

	avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, context->audio_input_buffer,
	                         context->audio_input_samples * c->channels * context->audio_sample_size, 1);

	if (avcodec_encode_audio2(c, &pkt, frame, &got_output) < 0) {
		// XXX error("Error writing audio packet");
		return -1;
	}

	if (!got_output) {
		avcodec_free_frame(&frame);
		return 0;
	}
#else
	pkt.size = avcodec_encode_audio(c, context->audio_output_buffer, context->audio_outbuf_size, (short *) context->audio_input_buffer);

	if (pkt.size < 0) {
		// XXX error("Error writing audio packet");
		return -1;
	}

	pkt.data = context->audio_output_buffer;
	got_output = 1;
#endif

	if (got_output) {
		if (pkt.pts != AV_NOPTS_VALUE)
			pkt.pts = av_rescale_q(pkt.pts, c->time_base, context->audio_stream->time_base);
		if (pkt.dts != AV_NOPTS_VALUE)
			pkt.dts = av_rescale_q(pkt.dts, c->time_base, context->audio_stream->time_base);
		if (pkt.duration > 0)
			pkt.duration = av_rescale_q(pkt.duration, c->time_base, context->audio_stream->time_base);

		pkt.stream_index = context->audio_stream->index;

		pkt.flags |= AV_PKT_FLAG_KEY;

		if (av_interleaved_write_frame(context->outfile, &pkt) != 0) {
			fprintf(stderr, "Error writing audio packet!\n");
			if (frame)
				avcodec_free_frame(&frame);
			return -1;
		}

		av_free_packet(&pkt);
	}

	if (frame)
		avcodec_free_frame(&frame);

	return 0;
}
int main(int argc, char **argv)
{
  if(argc != 5)
  {
    fprintf(stderr, "Usage: %s <segment length> <output location> <filename prefix> <encoding profile>\n", argv[0]);
    return 1;
  }

  struct config_info config;

  memset(&config, 0, sizeof(struct config_info));

  config.segment_length = atoi(argv[1]); 
  config.temp_directory = argv[2];
  config.filename_prefix = argv[3];
  config.encoding_profile = argv[4];
  config.input_filename = "pipe://1";

  char *output_filename = malloc(sizeof(char) * (strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10));
  if (!output_filename) 
  {
    fprintf(stderr, "Segmenter error: Could not allocate space for output filenames\n");
    exit(1);
  }

  // ------------------ Done parsing input --------------

  av_register_all();

  AVInputFormat *input_format = av_find_input_format("mpegts");
  if (!input_format) 
  {
    fprintf(stderr, "Segmenter error: Could not find MPEG-TS demuxer\n");
    exit(1);
  }

  AVFormatContext *input_context = NULL;
  int ret = av_open_input_file(&input_context, config.input_filename, input_format, 0, NULL);
  if (ret != 0) 
  {
    fprintf(stderr, "Segmenter error: Could not open input file, make sure it is an mpegts file: %d\n", ret);
    exit(1);
  }

  if (av_find_stream_info(input_context) < 0) 
  {
    fprintf(stderr, "Segmenter error: Could not read stream information\n");
    exit(1);
  }

#if LIBAVFORMAT_VERSION_MAJOR >= 52 && LIBAVFORMAT_VERSION_MINOR >= 45
  AVOutputFormat *output_format = av_guess_format("mpegts", NULL, NULL);
#else
  AVOutputFormat *output_format = guess_format("mpegts", NULL, NULL);
#endif
  if (!output_format) 
  {
    fprintf(stderr, "Segmenter error: Could not find MPEG-TS muxer\n");
    exit(1);
  }

  AVFormatContext *output_context = avformat_alloc_context();
  if (!output_context) 
  {
    fprintf(stderr, "Segmenter error: Could not allocated output context");
    exit(1);
  }
  output_context->oformat = output_format;

  int video_index = -1;
  int audio_index = -1;

  AVStream *video_stream;
  AVStream *audio_stream;

  int i;

  for (i = 0; i < input_context->nb_streams && (video_index < 0 || audio_index < 0); i++) 
  {
    switch (input_context->streams[i]->codec->codec_type) {
      case CODEC_TYPE_VIDEO:
        video_index = i;
        input_context->streams[i]->discard = AVDISCARD_NONE;
        video_stream = add_output_stream(output_context, input_context->streams[i]);
        break;
      case CODEC_TYPE_AUDIO:
        audio_index = i;
        input_context->streams[i]->discard = AVDISCARD_NONE;
        audio_stream = add_output_stream(output_context, input_context->streams[i]);
        break;
      default:
        input_context->streams[i]->discard = AVDISCARD_ALL;
        break;
    }
  }

  if (av_set_parameters(output_context, NULL) < 0) 
  {
    fprintf(stderr, "Segmenter error: Invalid output format parameters\n");
    exit(1);
  }

  dump_format(output_context, 0, config.filename_prefix, 1);

  if(video_index >= 0)
  {
    AVCodec *codec = avcodec_find_decoder(video_stream->codec->codec_id);
    if (!codec) 
    {
      fprintf(stderr, "Segmenter error: Could not find video decoder, key frames will not be honored\n");
    }

    if (avcodec_open(video_stream->codec, codec) < 0) 
    {
      fprintf(stderr, "Segmenter error: Could not open video decoder, key frames will not be honored\n");
    }
  }

  unsigned int output_index = 1;
  snprintf(output_filename, strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10, "%s/%s-%05u.ts", config.temp_directory, config.filename_prefix, output_index++);
  if (url_fopen(&output_context->pb, output_filename, URL_WRONLY) < 0) 
  {
    fprintf(stderr, "Segmenter error: Could not open '%s'\n", output_filename);
    exit(1);
  }

  if (av_write_header(output_context)) 
  {
    fprintf(stderr, "Segmenter error: Could not write mpegts header to first output file\n");
    exit(1);
  }

  unsigned int first_segment = 1;
  unsigned int last_segment = 0;

  double prev_segment_time = 0;
  int decode_done;
  do 
  {
    double segment_time;
    AVPacket packet;

    decode_done = av_read_frame(input_context, &packet);
    if (decode_done < 0) 
    {
      break;
    }

    if (av_dup_packet(&packet) < 0) 
    {
      fprintf(stderr, "Segmenter error: Could not duplicate packet");
      av_free_packet(&packet);
      break;
    }

    if (packet.stream_index == video_index && (packet.flags & PKT_FLAG_KEY)) 
    {
      segment_time = (double)video_stream->pts.val * video_stream->time_base.num / video_stream->time_base.den;
    }
    else if (video_index < 0) 
    {
      segment_time = (double)audio_stream->pts.val * audio_stream->time_base.num / audio_stream->time_base.den;
    }
    else 
    {
      segment_time = prev_segment_time;
    }

    // done writing the current file?
    if (segment_time - prev_segment_time >= config.segment_length) 
    {
      put_flush_packet(output_context->pb);
      url_fclose(output_context->pb);

      output_transfer_command(first_segment, ++last_segment, 0, config.encoding_profile);

      snprintf(output_filename, strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10, "%s/%s-%05u.ts", config.temp_directory, config.filename_prefix, output_index++);
      if (url_fopen(&output_context->pb, output_filename, URL_WRONLY) < 0) 
      {
        fprintf(stderr, "Segmenter error: Could not open '%s'\n", output_filename);
        break;
      }

      prev_segment_time = segment_time;
    }

    ret = av_interleaved_write_frame(output_context, &packet);
    if (ret < 0) 
    {
      fprintf(stderr, "Segmenter error: Could not write frame of stream: %d\n", ret);
    }
    else if (ret > 0) 
    {
      fprintf(stderr, "Segmenter info: End of stream requested\n");
      av_free_packet(&packet);
      break;
    }

    av_free_packet(&packet);
  } while (!decode_done);

  av_write_trailer(output_context);

  if (video_index >= 0) 
  {
    avcodec_close(video_stream->codec);
  }

  for(i = 0; i < output_context->nb_streams; i++) 
  {
    av_freep(&output_context->streams[i]->codec);
    av_freep(&output_context->streams[i]);
  }

  url_fclose(output_context->pb);
  av_free(output_context);

  output_transfer_command(first_segment, ++last_segment, 1, config.encoding_profile);

  return 0;
}
Пример #28
0
// 内部函数实现
static int split_media_file(char *dst, char *src, __int64 start, __int64 end, PFN_SPC spc)
{
    AVFormatContext *ifmt_ctx = NULL;
    AVFormatContext *ofmt_ctx = NULL;
    AVStream        *istream  = NULL;
    AVStream        *ostream  = NULL;
    AVRational       tbms     = { 1, 1000 };
    AVRational       tbvs     = { 1, 1    };
    int64_t          startpts = -1;
    int64_t          duration = -1;
    int64_t          total    = -1;
    int64_t          current  = -1;
    int              streamidx=  0;
    int              ret      = -1;

    av_register_all();
    avformat_network_init();

    if ((ret = avformat_open_input(&ifmt_ctx, src, 0, 0)) < 0) {
        printf("could not open input file '%s' !", src);
        goto done;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        printf("failed to retrieve input stream information ! \n");
        goto done;
    }

    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, dst);
    if (!ofmt_ctx) {
        printf("could not create output context !\n");
        goto done;
    }

    for (unsigned i=0; i<ifmt_ctx->nb_streams; i++) {
        istream = ifmt_ctx->streams[i];
        if (istream->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            streamidx = i;
            tbvs      = ifmt_ctx->streams[i]->time_base;
        }

        ostream = avformat_new_stream(ofmt_ctx, istream->codec->codec);
        if (!ostream) {
            printf("failed allocating output stream !\n");
            goto done;
        }

        ret = avcodec_copy_context(ostream->codec, istream->codec);
        if (ret < 0) {
            printf("failed to copy context from input to output stream codec context !\n");
            goto done;
        }

        ostream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
            ostream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        }
    }

    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, dst, AVIO_FLAG_WRITE);
        if (ret < 0) {
            printf("could not open output file '%s' !", dst);
            goto done;
        }
    }

    // calulate pts
    if (start >= 0) {
        startpts = ifmt_ctx->start_time * 1000 / AV_TIME_BASE;
        duration = ifmt_ctx->duration   * 1000 / AV_TIME_BASE;
        total    = duration - start; if (total < 0) total = 1;
        current  = 0;
        start   += startpts;
        end     += startpts;
        start    = av_rescale_q_rnd(start, tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        end      = av_rescale_q_rnd(end  , tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));

        // seek to start position
        av_seek_frame(ifmt_ctx, streamidx, start, AVSEEK_FLAG_BACKWARD);
    } else {
        startpts = ifmt_ctx->start_time * 1000 / AV_TIME_BASE;
        duration = end;
        total    = end;
        current  = 0;
        start    = startpts;
        end     += startpts;
        start    = av_rescale_q_rnd(start, tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        end      = av_rescale_q_rnd(end  , tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    }

    // write header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        printf("error occurred when writing output file header !\n");
        goto done;
    }

    while (!g_exit_remux) {
        AVPacket pkt;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0) {
//          fprintf(stderr, "failed to read frame !\n");
            break;
        }

        // get start pts
        if (pkt.stream_index == streamidx) {
            if (pkt.pts > end) {
                g_exit_remux = 1;
                goto next;
            }
            if (spc) {
                current = av_rescale_q_rnd(pkt.pts - start, tbvs, tbms, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
                if (current < 0    ) current = 0;
                if (current > total) current = total;
                spc(current, total);
            }
        }

        istream = ifmt_ctx->streams[pkt.stream_index];
        ostream = ofmt_ctx->streams[pkt.stream_index];
        pkt.pts = av_rescale_q_rnd(pkt.pts, istream->time_base, ostream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, istream->time_base, ostream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, istream->time_base, ostream->time_base);
        pkt.pos = -1;

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            printf("error muxing packet !\n");
            g_exit_remux = 1;
            goto next;
        }

next:
        av_packet_unref(&pkt);
    }

    // write trailer
    av_write_trailer(ofmt_ctx);

done:
    // close input
    avformat_close_input(&ifmt_ctx);

    // close output
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        avio_closep(&ofmt_ctx->pb);
    }

    avformat_free_context(ofmt_ctx);
    avformat_network_deinit();

    // done
    printf("\n");
    spc(total, total);
    printf("\ndone.\n");
    return ret;
}
int _tmain(int argc, _TCHAR* argv[])
{
    int ret;
    AVPacket packet;
    AVFrame *frame = NULL;
    enum AVMediaType type;
    unsigned int stream_index;
    unsigned int i;
    int got_frame;
    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
    if (argc != 3) {
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
        return 1;
    }
    av_register_all();
    avfilter_register_all();
    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = open_output_file(argv[2])) < 0)
        goto end;
    if ((ret = init_filters()) < 0)
        goto end;
    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
            break;
        stream_index = packet.stream_index;
        type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
                stream_index);
        if (filter_ctx[stream_index].filter_graph) {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame) {
                ret = AVERROR(ENOMEM);
                break;
            }
            packet.dts = av_rescale_q_rnd(packet.dts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ifmt_ctx->streams[stream_index]->codec->time_base,
                    (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            packet.pts = av_rescale_q_rnd(packet.pts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ifmt_ctx->streams[stream_index]->codec->time_base,
                    (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                avcodec_decode_audio4;
            ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
                    &got_frame, &packet);
            if (ret < 0) {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }
            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, stream_index);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            } else {
                av_frame_free(&frame);
            }
        } else {
            /* remux this frame without reencoding */
            packet.dts = av_rescale_q_rnd(packet.dts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ofmt_ctx->streams[stream_index]->time_base,
                     (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            packet.pts = av_rescale_q_rnd(packet.pts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ofmt_ctx->streams[stream_index]->time_base,
                     (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            ret = av_interleaved_write_frame(ofmt_ctx, &packet);
            if (ret < 0)
                goto end;
        }
        av_free_packet(&packet);
    }
    /* flush filters and encoders */
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        /* flush filter */
        if (!filter_ctx[i].filter_graph)
            continue;
        ret = filter_encode_write_frame(NULL, i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
            goto end;
        }
        /* flush encoder */
        ret = flush_encoder(i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
            goto end;
        }
    }
    av_write_trailer(ofmt_ctx);
end:
    av_free_packet(&packet);
    av_frame_free(&frame);
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        avcodec_close(ifmt_ctx->streams[i]->codec);
        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
            avcodec_close(ofmt_ctx->streams[i]->codec);
        if (filter_ctx && filter_ctx[i].filter_graph)
            avfilter_graph_free(&filter_ctx[i].filter_graph);
    }
    av_free(filter_ctx);
    avformat_close_input(&ifmt_ctx);
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0)
        av_log(NULL, AV_LOG_ERROR, "Error occurred\n");
    return (ret? 1:0);
}
Пример #30
-1
void ffmpegwrite::run(const sginstreams in)
{
	stream_ffmpeg_packet *temp = (stream_ffmpeg_packet*)in[0][0].get();
	av_interleaved_write_frame(this->form_context, &temp->packet);

}