Ejemplo n.º 1
1
int main(int argc, char *argv[])
{
    AVFormatContext *input_ctx = NULL;
    int video_stream, ret;
    AVStream *video = NULL;
    AVCodecContext *decoder_ctx = NULL;
    AVCodec *decoder = NULL;
    AVPacket packet;
    enum AVHWDeviceType type;
    int i;

    if (argc < 4) {
        fprintf(stderr, "Usage: %s <device type> <input file> <output file>\n", argv[0]);
        return -1;
    }

    type = av_hwdevice_find_type_by_name(argv[1]);
    if (type == AV_HWDEVICE_TYPE_NONE) {
        fprintf(stderr, "Device type %s is not supported.\n", argv[1]);
        fprintf(stderr, "Available device types:");
        while((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE)
            fprintf(stderr, " %s", av_hwdevice_get_type_name(type));
        fprintf(stderr, "\n");
        return -1;
    }

    /* open the input file */
    if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
        fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
        return -1;
    }

    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
        fprintf(stderr, "Cannot find input stream information.\n");
        return -1;
    }

    /* find the video stream information */
    ret = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
    if (ret < 0) {
        fprintf(stderr, "Cannot find a video stream in the input file\n");
        return -1;
    }
    video_stream = ret;

    for (i = 0;; i++) {
        const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
        if (!config) {
            fprintf(stderr, "Decoder %s does not support device type %s.\n",
                    decoder->name, av_hwdevice_get_type_name(type));
            return -1;
        }
        if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
            config->device_type == type) {
            hw_pix_fmt = config->pix_fmt;
            break;
        }
    }

    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
        return AVERROR(ENOMEM);

    video = input_ctx->streams[video_stream];
    if (avcodec_parameters_to_context(decoder_ctx, video->codecpar) < 0)
        return -1;

    decoder_ctx->get_format  = get_hw_format;
    av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);

    if (hw_decoder_init(decoder_ctx, type) < 0)
        return -1;

    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
        fprintf(stderr, "Failed to open codec for stream #%u\n", video_stream);
        return -1;
    }

    /* open the file to dump raw data */
    output_file = fopen(argv[3], "w+");

    /* actual decoding and dump the raw data */
    while (ret >= 0) {
        if ((ret = av_read_frame(input_ctx, &packet)) < 0)
            break;

        if (video_stream == packet.stream_index)
            ret = decode_write(decoder_ctx, &packet);

        av_packet_unref(&packet);
    }

    /* flush the decoder */
    packet.data = NULL;
    packet.size = 0;
    ret = decode_write(decoder_ctx, &packet);
    av_packet_unref(&packet);

    if (output_file)
        fclose(output_file);
    avcodec_free_context(&decoder_ctx);
    avformat_close_input(&input_ctx);
    av_buffer_unref(&hw_device_ctx);

    return 0;
}
static AVCodecContext *wrapped_avcodec_get_context(uint32_t cAVIdx, AVStream *stream)
{
#if (LIBAVFORMAT_VERSION_MAJOR > 57) || ((LIBAVFORMAT_VERSION_MAJOR == 57) && (LIBAVFORMAT_VERSION_MINOR > 32))
    AVCodecContext *avCodecCtx = restore_avcodec_context(cAVIdx, stream->id);
    if (!avCodecCtx)
    {
        avCodecCtx = avcodec_alloc_context3(NULL);
        if (!avCodecCtx) 
        {
            ffmpeg_err("context3 alloc for stream %d failed\n", (int)stream->id);
            return NULL;
        }

        if (avcodec_parameters_to_context(avCodecCtx, stream->codecpar) < 0)
        {
            ffmpeg_err("parameters to context for stream %d failed\n", (int)stream->id);
            avcodec_free_context(&avCodecCtx);
            return NULL;
        }
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)
        av_codec_set_pkt_timebase(avCodecCtx, stream->time_base);
#else
        avCodecCtx->pkt_timebase = stream->time_base;
#endif
        store_avcodec_context(avCodecCtx, cAVIdx, stream->id);

        return avCodecCtx;
    }
#else
    return stream->codec;
#endif
}
Ejemplo n.º 3
0
STATUS DemuxerLibAV::selectAudioStream(S32 index_audio) {
	if (!_initialized) {
		log->printf("DemuxerLibAV::selectAudioStream(): demuxer not initialized!\n");
		return S_FAIL;
	}

	S32 count_audio = 0;
	for (U32 i = 0; i < _afc->nb_streams; i++) {
		AVStream *stream = _afc->streams[i];
		AVCodec *codec = avcodec_find_decoder(stream->codecpar->codec_id);
		if (codec == NULL) {
			log->printf("DemuxerLibAV::selectAudioStream(): avcodec_find_decoder failed!\n");
			return S_FAIL;
		}
		AVCodecContext *cc = avcodec_alloc_context3(codec);
		if (cc == NULL) {
			log->printf("DemuxerLibAV::selectAudioStream(): avcodec_alloc_context3 failed!\n");
			return S_FAIL;
		}
		if (avcodec_parameters_to_context(cc, stream->codecpar) < 0) {
			log->printf("DemuxerLibAV::selectAudioStream(): avcodec_alloc_context3 failed!\n");
			avcodec_free_context(&cc);
			return S_FAIL;
		}
		if (cc->codec_type == AVMEDIA_TYPE_AUDIO) {
			if (count_audio++ == index_audio || index_audio == -1) {
				_audioStream = _afc->streams[i];
				return S_OK;
			}
		}
	}

	return S_FAIL;
}
Ejemplo n.º 4
0
AVCodecContext* movie_player::get_codec_context_for_stream(AVCodec* codec, AVStream* stream) const
{
#if (defined(CORSIX_TH_USE_LIBAV) && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 14, 0)) || \
    (defined(CORSIX_TH_USE_FFMPEG) && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 33, 100))
    AVCodecContext* ctx = avcodec_alloc_context3(codec);
    avcodec_parameters_to_context(ctx, stream->codecpar);
    return ctx;
#else
    return stream->codec;
#endif
}
Ejemplo n.º 5
0
InputContext * input_context_new (const char * filename) {
    int ok = 0;

    // open file
    AVFormatContext * pfc = NULL;
    ok = avformat_open_input(&pfc, filename, NULL, NULL);
    if (ok != 0) {
        goto failed;
    }

    // find stream
    int stnb = av_find_best_stream(pfc, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    AVStream * pst = pfc->streams[stnb];
    AVRational * ptb = &pst->time_base;
    int64_t duration = av_rescale(pst->duration, ptb->num, ptb->den);

    // find codec
    AVCodec * pc = avcodec_find_decoder(pst->codecpar->codec_id);
    if (!pc) {
        goto close_demuxer;
    }
    AVCodecContext * pcc = avcodec_alloc_context3(pc);
    if (!pcc) {
        goto close_demuxer;
    }
    ok = avcodec_parameters_to_context(pcc, pst->codecpar);
    if (ok < 0) {
        goto close_decoder;
    }
    ok = avcodec_open2(pcc, pc, NULL);
    if (ok != 0) {
        goto close_decoder;
    }

    InputContext * context = malloc(sizeof(InputContext));
    context->format_context = pfc;
    context->stream = pst;
    context->codec = pc;
    context->codec_context = pcc;
    context->stream_index = stnb;
    context->time_base = ptb;
    context->duration = duration;

    return context;

close_decoder:
    avcodec_free_context(&pcc);
close_demuxer:
    avformat_close_input(&pfc);
failed:
    return NULL;
}
Ejemplo n.º 6
0
static int open_input_file(const char *filename)
{
    int ret;
    AVCodec *decoder = NULL;
    AVStream *video = NULL;

    if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
        fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
                filename, av_err2str(ret));
        return ret;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
        fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
                av_err2str(ret));
        return ret;
    }

    ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
    if (ret < 0) {
        fprintf(stderr, "Cannot find a video stream in the input file. "
                "Error code: %s\n", av_err2str(ret));
        return ret;
    }
    video_stream = ret;

    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
        return AVERROR(ENOMEM);

    video = ifmt_ctx->streams[video_stream];
    if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) < 0) {
        fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
                av_err2str(ret));
        return ret;
    }

    decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
    if (!decoder_ctx->hw_device_ctx) {
        fprintf(stderr, "A hardware device reference create failed.\n");
        return AVERROR(ENOMEM);
    }
    decoder_ctx->get_format    = get_vaapi_format;

    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0)
        fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
                av_err2str(ret));

    return ret;
}
Ejemplo n.º 7
0
/**
 * Opens the given stream, i.e. sets up a decoder.
 *
 * @param env JNIEnv
 * @param stream AVStream
 */
int ff_open_stream(JNIEnv *env, AVStream *stream, AVCodecContext **context) {
#ifdef DEBUG
    fprintf(stderr, "Opening stream...\n");
#endif

    int res = 0;
    AVCodec *decoder = NULL;
    AVDictionary *opts = NULL;
    int refcount = 0; // is this correct?

    decoder = avcodec_find_decoder(stream->codecpar->codec_id);
    if (!decoder) {
        fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        res = AVERROR(EINVAL);
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to find codec.");
        goto bail;
    }

    *context = avcodec_alloc_context3(decoder);
    if (!context) {
        fprintf(stderr, "Failed to allocate context\n");
        res = AVERROR(EINVAL);
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to allocate codec context.");
        goto bail;
    }

    /* Copy codec parameters from input stream to output codec context */
    if ((res = avcodec_parameters_to_context(*context, stream->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to copy codec parameters.");
        goto bail;
    }

    /* Init the decoders, with or without reference counting */
    av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
    if ((res = avcodec_open2(*context, decoder, &opts)) < 0) {
        fprintf(stderr, "Failed to open %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open codec.");
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Stream was opened.\n");
#endif
    return res;

bail:
    return res;
}
Ejemplo n.º 8
0
// Set avctx codec headers for decoding. Returns <0 on failure.
int mp_set_avctx_codec_headers(AVCodecContext *avctx, struct mp_codec_params *c)
{
    enum AVMediaType codec_type = avctx->codec_type;
    enum AVCodecID codec_id = avctx->codec_id;
    AVCodecParameters *avp = mp_codec_params_to_av(c);
    if (!avp)
        return -1;

    int r = avcodec_parameters_to_context(avctx, avp) < 0 ? -1 : 0;
    avcodec_parameters_free(&avp);

    if (avctx->codec_type != AVMEDIA_TYPE_UNKNOWN)
        avctx->codec_type = codec_type;
    if (avctx->codec_id != AV_CODEC_ID_NONE)
        avctx->codec_id = codec_id;
    return r;
}
static int remove_extradata_init(AVBSFContext *ctx)
{
    RemoveExtradataContext *s = ctx->priv_data;
    int ret;

    s->parser = av_parser_init(ctx->par_in->codec_id);

    if (s->parser) {
        s->avctx = avcodec_alloc_context3(NULL);
        if (!s->avctx)
            return AVERROR(ENOMEM);

        ret = avcodec_parameters_to_context(s->avctx, ctx->par_in);
        if (ret < 0)
            return ret;
    }

    return 0;
}
Ejemplo n.º 10
0
static int mp_open_codec(struct mp_decode *d)
{
	AVCodecContext *c;
	int ret;

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
	c = avcodec_alloc_context3(d->codec);
	if (!c) {
		blog(LOG_WARNING, "MP: Failed to allocate context");
		return -1;
	}

	ret = avcodec_parameters_to_context(c, d->stream->codecpar);
	if (ret < 0)
		goto fail;
#else
	c = d->stream->codec;
#endif

	if (c->thread_count == 1 &&
	    c->codec_id != AV_CODEC_ID_PNG &&
	    c->codec_id != AV_CODEC_ID_TIFF &&
	    c->codec_id != AV_CODEC_ID_JPEG2000 &&
	    c->codec_id != AV_CODEC_ID_MPEG4 &&
	    c->codec_id != AV_CODEC_ID_WEBP)
		c->thread_count = 0;

	ret = avcodec_open2(c, d->codec, NULL);
	if (ret < 0)
		goto fail;

	d->decoder = c;
	return ret;

fail:
	avcodec_close(c);
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
	av_free(d->decoder);
#endif
	return ret;
}
Ejemplo n.º 11
0
static int open_input_file(const char *filename)
{
    int ret;
    AVCodec *dec;

    if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
        return ret;
    }

    if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
        return ret;
    }

    /* select the audio stream */
    ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
        return ret;
    }
    audio_stream_index = ret;

    /* create decoding context */
    dec_ctx = avcodec_alloc_context3(dec);
    if (!dec_ctx)
        return AVERROR(ENOMEM);
    avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);

    /* init the audio decoder */
    if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
        return ret;
    }

    return 0;
}
Ejemplo n.º 12
0
int AudioWorks::init(const char* filename)
{
	int ret = 0;
    av_register_all();

	/*open formatctx*/
	ret = avformat_open_input(&formatCtx, filename, NULL, NULL);
	if(ret < 0){
		std::cout<<"avformat open failed!"<<std::endl;
		return ret;
	}

	ret = avformat_find_stream_info(formatCtx, NULL); 
	if(ret < 0){
		std::cout<<"find stream info failed!"<<std::endl;	
		return ret;
	}
	
	/*find audio stream index*/
	int audioStreamIndex = av_find_best_stream(formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); 
	if(audioStreamIndex < 0){
		std::cout<<"can't find audio stream"<<std::endl;	
		return audioStreamIndex;
	}
	audioIndex = audioStreamIndex;
    /*alloc audioCodecCtx and open it*/
    audioCodecCtx = avcodec_alloc_context3(NULL);
    if(audioCodecCtx == NULL){
		std::cout<<"alloc AVCodecContext failed!"<<std::endl;
		return -1;
	}
    ret = avcodec_parameters_to_context(audioCodecCtx, formatCtx->streams[audioStreamIndex]->codecpar);
	/*find the decode type*/
    AVCodec* codec = avcodec_find_decoder(audioCodecCtx->codec_id);
	if(codec == NULL){
		std::cout<<"find decoder failed!"<<std::endl;
		return -1;
	}
    AVDictionary *opts = NULL;
    av_dict_set(&opts, "refcounted_frames", "1", 0);

    ret = avcodec_open2(audioCodecCtx, codec, &opts);
	if(ret < 0){
        std::cout<<"open audioCodecCtx failed!"<<std::endl;
		return -1;
	}

    /*find video stream index*/
    int videoStreamIndex = av_find_best_stream(formatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if(videoStreamIndex < 0){
        std::cout<<"can't find video stream"<<std::endl;
        return videoStreamIndex;
    }
    videoIndex = videoStreamIndex;
    /*alloc audioCodecCtx and open it*/
    videoCodecCtx = avcodec_alloc_context3(NULL);
    if(videoCodecCtx == NULL){
        std::cout<<"alloc AVCodecContext failed!"<<std::endl;
        return -1;
    }
    ret = avcodec_parameters_to_context(videoCodecCtx, formatCtx->streams[videoStreamIndex]->codecpar);
    /*find the decode type*/
    AVCodec* vcodec = avcodec_find_decoder(videoCodecCtx->codec_id);
    if(vcodec == NULL){
        std::cout<<"find decoder failed!"<<std::endl;
        return -1;
    }
    AVDictionary *vopts = NULL;
    av_dict_set(&vopts, "refcounted_frames", "1", 0);

    ret = avcodec_open2(videoCodecCtx, vcodec, &vopts);
    if(ret < 0){
        std::cout<<"open videoCodecCtx failed!"<<std::endl;
        return -1;
    }
    return 0;
}
Ejemplo n.º 13
0
bool FFMpegLoader::open(qint64 &position) {
	if (!AbstractFFMpegLoader::open(position)) {
		return false;
	}

	int res = 0;
	char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };

	auto codecParams = fmtContext->streams[streamId]->codecpar;

	codecContext = avcodec_alloc_context3(nullptr);
	if (!codecContext) {
		LOG(("Audio Error: Unable to avcodec_alloc_context3 for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
		return false;
	}
	if ((res = avcodec_parameters_to_context(codecContext, codecParams)) < 0) {
		LOG(("Audio Error: Unable to avcodec_parameters_to_context for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
		return false;
	}
	av_codec_set_pkt_timebase(codecContext, fmtContext->streams[streamId]->time_base);
	av_opt_set_int(codecContext, "refcounted_frames", 1, 0);

	if ((res = avcodec_open2(codecContext, codec, 0)) < 0) {
		LOG(("Audio Error: Unable to avcodec_open2 for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
		return false;
	}

	auto layout = codecParams->channel_layout;
	if (!layout) {
		auto channelsCount = codecParams->channels;
		switch (channelsCount) {
		case 1: layout = AV_CH_LAYOUT_MONO; break;
		case 2: layout = AV_CH_LAYOUT_STEREO; break;
		default: LOG(("Audio Error: Unknown channel layout for %1 channels.").arg(channelsCount)); break;
		}
	}
	inputFormat = codecContext->sample_fmt;
	switch (layout) {
	case AV_CH_LAYOUT_MONO:
		switch (inputFormat) {
		case AV_SAMPLE_FMT_U8:
		case AV_SAMPLE_FMT_U8P: fmt = AL_FORMAT_MONO8; sampleSize = 1; break;
		case AV_SAMPLE_FMT_S16:
		case AV_SAMPLE_FMT_S16P: fmt = AL_FORMAT_MONO16; sampleSize = sizeof(uint16); break;
		default:
			sampleSize = -1; // convert needed
		break;
		}
	break;
	case AV_CH_LAYOUT_STEREO:
		switch (inputFormat) {
		case AV_SAMPLE_FMT_U8: fmt = AL_FORMAT_STEREO8; sampleSize = 2; break;
		case AV_SAMPLE_FMT_S16: fmt = AL_FORMAT_STEREO16; sampleSize = 2 * sizeof(uint16); break;
		default:
			sampleSize = -1; // convert needed
		break;
		}
	break;
	default:
		sampleSize = -1; // convert needed
	break;
	}
	if (_samplesFrequency != 44100 && _samplesFrequency != 48000) {
		sampleSize = -1; // convert needed
	}

	if (sampleSize < 0) {
		swrContext = swr_alloc();
		if (!swrContext) {
			LOG(("Audio Error: Unable to swr_alloc for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
			return false;
		}
		int64_t src_ch_layout = layout, dst_ch_layout = AudioToChannelLayout;
		srcRate = _samplesFrequency;
		AVSampleFormat src_sample_fmt = inputFormat, dst_sample_fmt = AudioToFormat;
		dstRate = (_samplesFrequency != 44100 && _samplesFrequency != 48000) ? Media::Player::kDefaultFrequency : _samplesFrequency;

		av_opt_set_int(swrContext, "in_channel_layout", src_ch_layout, 0);
		av_opt_set_int(swrContext, "in_sample_rate", srcRate, 0);
		av_opt_set_sample_fmt(swrContext, "in_sample_fmt", src_sample_fmt, 0);
		av_opt_set_int(swrContext, "out_channel_layout", dst_ch_layout, 0);
		av_opt_set_int(swrContext, "out_sample_rate", dstRate, 0);
		av_opt_set_sample_fmt(swrContext, "out_sample_fmt", dst_sample_fmt, 0);

		if ((res = swr_init(swrContext)) < 0) {
			LOG(("Audio Error: Unable to swr_init for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
			return false;
		}

		sampleSize = AudioToChannels * sizeof(short);
		_samplesFrequency = dstRate;
		_samplesCount = av_rescale_rnd(_samplesCount, dstRate, srcRate, AV_ROUND_UP);
		position = av_rescale_rnd(position, dstRate, srcRate, AV_ROUND_DOWN);
		fmt = AL_FORMAT_STEREO16;

		maxResampleSamples = av_rescale_rnd(AVBlockSize / sampleSize, dstRate, srcRate, AV_ROUND_UP);
		if ((res = av_samples_alloc_array_and_samples(&dstSamplesData, 0, AudioToChannels, maxResampleSamples, AudioToFormat, 0)) < 0) {
			LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
			return false;
		}
	}
	if (position) {
		int64 ts = (position * fmtContext->streams[streamId]->time_base.den) / (_samplesFrequency * fmtContext->streams[streamId]->time_base.num);
		if (av_seek_frame(fmtContext, streamId, ts, AVSEEK_FLAG_ANY) < 0) {
			if (av_seek_frame(fmtContext, streamId, ts, 0) < 0) {
			}
		}
	}

	return true;
}
Ejemplo n.º 14
0
bool CFFmpegImage::Initialize(unsigned char* buffer, size_t bufSize)
{
  int bufferSize = 4096;
  uint8_t* fbuffer = (uint8_t*)av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
  if (!fbuffer)
  {
    CLog::LogF(LOGERROR, "Could not allocate buffer");
    return false;
  }
  m_buf.data = buffer;
  m_buf.size = bufSize;
  m_buf.pos = 0;

  m_ioctx = avio_alloc_context(fbuffer, bufferSize, 0, &m_buf,
    mem_file_read, NULL, mem_file_seek);

  if (!m_ioctx)
  {
    av_free(fbuffer);
    CLog::LogF(LOGERROR, "Could not allocate AVIOContext");
    return false;
  }

  // signal to ffmepg this is not streaming protocol
  m_ioctx->max_packet_size = bufferSize;

  m_fctx = avformat_alloc_context();
  if (!m_fctx)
  {
    FreeIOCtx(&m_ioctx);
    CLog::LogF(LOGERROR, "Could not allocate AVFormatContext");
    return false;
  }

  m_fctx->pb = m_ioctx;

  // Some clients have pngs saved as jpeg or ask us for png but are jpeg
  // mythv throws all mimetypes away and asks us with application/octet-stream
  // this is poor man's fallback to at least identify png / jpeg
  bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF);
  bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G');
  bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*');

  AVInputFormat* inp = nullptr;
  if (is_jpeg)
    inp = av_find_input_format("jpeg_pipe");
  else if (m_strMimeType == "image/apng")
    inp = av_find_input_format("apng");
  else if (is_png)
    inp = av_find_input_format("png_pipe");
  else if (is_tiff)
    inp = av_find_input_format("tiff_pipe");
  else if (m_strMimeType == "image/jp2")
    inp = av_find_input_format("j2k_pipe");
  else if (m_strMimeType == "image/webp")
    inp = av_find_input_format("webp_pipe");
  // brute force parse if above check already failed
  else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
    inp = av_find_input_format("jpeg_pipe");
  else if (m_strMimeType == "image/png")
    inp = av_find_input_format("png_pipe");
  else if (m_strMimeType == "image/tiff")
    inp = av_find_input_format("tiff_pipe");
  else if (m_strMimeType == "image/gif")
    inp = av_find_input_format("gif");

  if (avformat_open_input(&m_fctx, NULL, inp, NULL) < 0)
  {
    CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str());
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  if (m_fctx->nb_streams <= 0)
  {
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }
  AVCodecParameters* codec_params = m_fctx->streams[0]->codecpar;
  AVCodec* codec = avcodec_find_decoder(codec_params->codec_id);
  m_codec_ctx = avcodec_alloc_context3(codec);
  if (!m_codec_ctx)
  {
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  if (avcodec_parameters_to_context(m_codec_ctx, codec_params) < 0)
  {
    avformat_close_input(&m_fctx);
    avcodec_free_context(&m_codec_ctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  if (avcodec_open2(m_codec_ctx, codec, NULL) < 0)
  {
    avformat_close_input(&m_fctx);
    avcodec_free_context(&m_codec_ctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  return true;
}
Ejemplo n.º 15
0
/**
 * Open an input file and the required decoder.
 * @param      filename             File to be opened
 * @param[out] input_format_context Format context of opened file
 * @param[out] input_codec_context  Codec context of opened file
 * @return Error code (0 if successful)
 */
int Transcode::open_input_file(const char *filename,
                           AVFormatContext **input_format_context,
                           AVCodecContext **input_codec_context)
{
    AVCodecContext *avctx;
    AVCodec *input_codec;
    int error;

    /* Open the input file to read from it. */
    if ((error = avformat_open_input(input_format_context, filename, NULL,
                                     NULL)) < 0) {
        
        fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
                filename, av_cplus_err2str(error));
        *input_format_context = NULL;
        return error;
    }

    /* Get information on the input file (number of streams etc.). */
    if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
        fprintf(stderr, "Could not open find stream info (error '%s')\n",
                av_cplus_err2str(error));
        avformat_close_input(input_format_context);
        return error;
    }

    /* Make sure that there is only one stream in the input file. */
    if ((*input_format_context)->nb_streams != 1) {
        fprintf(stderr, "Expected one audio input stream, but found %d\n",
                (*input_format_context)->nb_streams);
        avformat_close_input(input_format_context);
        return AVERROR_EXIT;
    }

    /* Find a decoder for the audio stream. */
    if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
        fprintf(stderr, "Could not find input codec\n");
        avformat_close_input(input_format_context);
        return AVERROR_EXIT;
    }

    /* Allocate a new decoding context. */
    avctx = avcodec_alloc_context3(input_codec);
    if (!avctx) {
        fprintf(stderr, "Could not allocate a decoding context\n");
        avformat_close_input(input_format_context);
        return AVERROR(ENOMEM);
    }

    /* Initialize the stream parameters with demuxer information. */
    error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
    if (error < 0) {
        avformat_close_input(input_format_context);
        avcodec_free_context(&avctx);
        return error;
    }

    /* Open the decoder for the audio stream to use it later. */
    if ((error = avcodec_open2(avctx, input_codec, NULL)) < 0) {
        fprintf(stderr, "Could not open input codec (error '%s')\n",
                av_cplus_err2str(error));
        avcodec_free_context(&avctx);
        avformat_close_input(input_format_context);
        return error;
    }

    /* Save the decoder context for easier access later. */
    *input_codec_context = avctx;

    return 0;
}
Ejemplo n.º 16
0
static int alloc(struct vidsrc_st **stp, const struct vidsrc *vs,
		 struct media_ctx **mctx, struct vidsrc_prm *prm,
		 const struct vidsz *size, const char *fmt,
		 const char *dev, vidsrc_frame_h *frameh,
		 vidsrc_error_h *errorh, void *arg)
{
#if LIBAVFORMAT_VERSION_INT < ((52<<16) + (110<<8) + 0)
	AVFormatParameters prms;
#endif
	struct vidsrc_st *st;
	bool found_stream = false;
	uint32_t i;
	int ret, err = 0;
	int input_fps = 0;

	(void)mctx;
	(void)errorh;

	if (!stp || !vs || !prm || !size || !frameh)
		return EINVAL;

	st = mem_zalloc(sizeof(*st), destructor);
	if (!st)
		return ENOMEM;

	st->vs     = vs;
	st->sz     = *size;
	st->frameh = frameh;
	st->arg    = arg;
	st->fps    = prm->fps;

	/*
	 * avformat_open_input() was added in lavf 53.2.0 according to
	 * ffmpeg/doc/APIchanges
	 */

#if LIBAVFORMAT_VERSION_INT >= ((52<<16) + (110<<8) + 0)
	(void)fmt;
	ret = avformat_open_input(&st->ic, dev, NULL, NULL);
#else

	/* Params */
	memset(&prms, 0, sizeof(prms));

	prms.time_base          = (AVRational){1, prm->fps};
	prms.channels           = 1;
	prms.width              = size->w;
	prms.height             = size->h;
	prms.pix_fmt            = AV_PIX_FMT_YUV420P;
	prms.channel            = 0;

	ret = av_open_input_file(&st->ic, dev, av_find_input_format(fmt),
				 0, &prms);
#endif

	if (ret < 0) {
		err = ENOENT;
		goto out;
	}

#if LIBAVFORMAT_VERSION_INT >= ((53<<16) + (4<<8) + 0)
	ret = avformat_find_stream_info(st->ic, NULL);
#else
	ret = av_find_stream_info(st->ic);
#endif

	if (ret < 0) {
		warning("avformat: %s: no stream info\n", dev);
		err = ENOENT;
		goto out;
	}

#if 0
	dump_format(st->ic, 0, dev, 0);
#endif

	for (i=0; i<st->ic->nb_streams; i++) {
		const struct AVStream *strm = st->ic->streams[i];
		AVCodecContext *ctx;
		double dfps;

#if LIBAVFORMAT_VERSION_INT >= ((57<<16) + (33<<8) + 100)

		ctx = avcodec_alloc_context3(NULL);
		if (!ctx) {
			err = ENOMEM;
			goto out;
		}

		ret = avcodec_parameters_to_context(ctx, strm->codecpar);
		if (ret < 0) {
			warning("avformat: avcodec_parameters_to_context\n");
			err = EPROTO;
			goto out;
		}

#else
		ctx = strm->codec;
#endif

		if (ctx->codec_type != AVMEDIA_TYPE_VIDEO)
			continue;

		debug("avformat: stream %u:  %u x %u "
		      "  time_base=%d/%d\n",
		      i, ctx->width, ctx->height,
		      strm->time_base.num, strm->time_base.den);

		st->sz.w   = ctx->width;
		st->sz.h   = ctx->height;
		st->ctx    = ctx;
		st->sindex = strm->index;
		st->time_base = strm->time_base;

		dfps = av_q2d(strm->avg_frame_rate);
		input_fps = (int)dfps;
		if (st->fps != input_fps) {
			info("avformat: updating %i fps from config to native "
				"input material fps %i\n", st->fps, input_fps);
			st->fps = input_fps;
#if LIBAVFORMAT_VERSION_INT < ((52<<16) + (110<<8) + 0)
			prms.time_base = (AVRational){1, st->fps};
#endif
		}

		if (ctx->codec_id != AV_CODEC_ID_NONE) {

			st->codec = avcodec_find_decoder(ctx->codec_id);
			if (!st->codec) {
				err = ENOENT;
				goto out;
			}

#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
			ret = avcodec_open2(ctx, st->codec, NULL);
#else
			ret = avcodec_open(ctx, st->codec);
#endif
			if (ret < 0) {
				err = ENOENT;
				goto out;
			}
		}

		found_stream = true;
		break;
	}

	if (!found_stream) {
		err = ENOENT;
		goto out;
	}

	st->run = true;
	err = pthread_create(&st->thread, NULL, read_thread, st);
	if (err) {
		st->run = false;
		goto out;
	}

 out:
	if (err)
		mem_deref(st);
	else
		*stp = st;

	return err;
}
Ejemplo n.º 17
0
static int open_input_file(InputFile *ifile, const char *filename)
{
    int err, i;
    AVFormatContext *fmt_ctx = NULL;
    AVDictionaryEntry *t;

    if ((err = avformat_open_input(&fmt_ctx, filename,
                                   iformat, &format_opts)) < 0) {
        print_error(filename, err);
        return err;
    }
    if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
        return AVERROR_OPTION_NOT_FOUND;
    }


    /* fill the streams in the format context */
    if ((err = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
        print_error(filename, err);
        return err;
    }

    av_dump_format(fmt_ctx, 0, filename, 0);

    ifile->streams = av_mallocz_array(fmt_ctx->nb_streams,
                                      sizeof(*ifile->streams));
    if (!ifile->streams)
        exit(1);
    ifile->nb_streams = fmt_ctx->nb_streams;

    /* bind a decoder to each input stream */
    for (i = 0; i < fmt_ctx->nb_streams; i++) {
        InputStream *ist = &ifile->streams[i];
        AVStream *stream = fmt_ctx->streams[i];
        AVCodec *codec;

        ist->st = stream;

        if (stream->codecpar->codec_id == AV_CODEC_ID_PROBE) {
            fprintf(stderr, "Failed to probe codec for input stream %d\n",
                    stream->index);
            continue;
        }

        codec = avcodec_find_decoder(stream->codecpar->codec_id);
        if (!codec) {
            fprintf(stderr,
                    "Unsupported codec with id %d for input stream %d\n",
                    stream->codecpar->codec_id, stream->index);
            continue;
        }

        ist->dec_ctx = avcodec_alloc_context3(codec);
        if (!ist->dec_ctx)
            exit(1);

        err = avcodec_parameters_to_context(ist->dec_ctx, stream->codecpar);
        if (err < 0)
            exit(1);

        err = avcodec_open2(ist->dec_ctx, NULL, NULL);
        if (err < 0) {
            fprintf(stderr, "Error while opening codec for input stream %d\n",
                    stream->index);
            exit(1);

        }
    }

    ifile->fmt_ctx = fmt_ctx;
    return 0;
}
Ejemplo n.º 18
0
int fe_decode_open(char *filename) {
    int i = -1;
    AVDictionary *l_iFormatOpts = NULL;

    printf("fe_decode_open: Decode audio file %s\n",
           filename);

    m_pFormatCtx = avformat_alloc_context();

// Enable this to use old slow MP3 Xing TOC
#ifndef CODEC_ID_MP3

    if ( LIBAVFORMAT_VERSION_INT > 3540580 ) {
        printf("fe_decode_open: Set usetoc to have old way of XING TOC reading (libavformat version: '%d')\n", LIBAVFORMAT_VERSION_INT);
        av_dict_set(&l_iFormatOpts, "usetoc", "0", 0);
    }

#endif

    // Open file and make m_pFormatCtx
    if (avformat_open_input(&m_pFormatCtx, filename, NULL, &l_iFormatOpts) != 0) {
        printf("fe_decode_open: cannot open with 'avformat_open_input': %s\n",
               filename);
        return -1;
    }

#ifndef CODEC_ID_MP3

    if ( LIBAVFORMAT_VERSION_INT > 3540580 && l_iFormatOpts != NULL ) {
        av_dict_free(&l_iFormatOpts);
    }

#endif

#if LIBAVCODEC_VERSION_INT < 3622144
    m_pFormatCtx->max_analyze_duration = 999999999;
#endif

    // Retrieve stream information
    if (avformat_find_stream_info(m_pFormatCtx, NULL) < 0) {
        printf("fe_decode_open: cannot open find info '%s'\n",
               filename);
        printf("As documentation says this is pretty normal. So this not show stopper!\n");
    }

    av_dump_format(m_pFormatCtx, 0, filename, 0);

    // Find the first video stream
    m_iAudioStream = -1;

    printf("fe_decode_open: File got streams: %d\n", m_pFormatCtx->nb_streams);

    for (i = 0; i < m_pFormatCtx->nb_streams; i++) {
// If we have FFMpeg version which is less than 3.2 then we use older implementation
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)
        if (m_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
#else

        if (m_pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
#endif
            m_iAudioStream = i;
            break;
        }
    }

    if (m_iAudioStream == -1) {
        printf("fe_decode_open: cannot find an audio stream: cannot open %s",
               filename);
        return -1;
    }

    // Find the decoder for the audio stream
    // If we have FFMpeg version which is less than 3.2 then we use older implementation
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)

    if (!(m_pCodec = avcodec_find_decoder(m_pFormatCtx->streams[m_iAudioStream]->codec_id))) {
#else

    if (!(m_pCodec = avcodec_find_decoder(m_pFormatCtx->streams[m_iAudioStream]->codecpar->codec_id))) {
#endif
        printf("fe_decode_open: cannot find a decoder for %s\n",
               filename);
        return -1;
    }

// If we have FFMpeg version which is less than 3.2 then we use older implementation
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)
    // Get a pointer to the codec context for the video stream
    m_pCodecCtx = m_pFormatCtx->streams[m_iAudioStream]->codec;

#else
    // Get a pointer to the codec context for the video stream
    //m_pCodecCtx = avcodec_alloc_context3(m_pCodec);
    m_pCodecCtx = avcodec_alloc_context3(NULL);

    // Add stream parameters to context
    if(avcodec_parameters_to_context(m_pCodecCtx, m_pFormatCtx->streams[m_iAudioStream]->codecpar)) {
        printf("fe_decode_open: cannot add Codec parameters: %s\n",
               filename);
        return -1;
    }

// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
    // Se timebase correct
    av_codec_set_pkt_timebase(m_pCodecCtx, m_pFormatCtx->streams[m_iAudioStream]->time_base);
#endif

    // Make sure that Codecs are identical or  avcodec_open2 fails.
    m_pCodecCtx->codec_id = m_pCodec->id;
#endif

    if(!m_pCodecCtx) {
        printf("fe_decode_open: cannot get 'AVCodecContext'\n");
        return -1;
    }

    if (avcodec_open2(m_pCodecCtx, m_pCodec, NULL) < 0) {
        printf("fe_decode_open: cannot open with 'avcodec_open2' codec_id: %d Audio stream id: %d: %s\n",
               m_pFormatCtx->streams[m_iAudioStream]->codecpar->codec_id,
               m_iAudioStream, filename
              );
        return -1;
    }

    printf("fe_decode_open: PCM Length is: %f (Bytes: %ld)\n",
           (double)(m_pFormatCtx->duration / AV_TIME_BASE),
           (int64_t)round((double)(m_pFormatCtx->duration / AV_TIME_BASE) * (44100 * 4)));
    m_lPcmLength = (int64_t)round((double)(m_pFormatCtx->duration / AV_TIME_BASE) * (44100 * 4));

    return 0;
}
Ejemplo n.º 19
0
static int video_decode_example(const char *input_filename)
{
    AVCodec *codec = NULL;
    AVCodecContext *ctx= NULL;
    AVCodecParameters *origin_par = NULL;
    AVFrame *fr = NULL;
    uint8_t *byte_buffer = NULL;
    AVPacket pkt;
    AVFormatContext *fmt_ctx = NULL;
    int number_of_written_bytes;
    int video_stream;
    int got_frame = 0;
    int byte_buffer_size;
    int i = 0;
    int result;
    int end_of_stream = 0;

    result = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL);
    if (result < 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't open file\n");
        return result;
    }

    result = avformat_find_stream_info(fmt_ctx, NULL);
    if (result < 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't get stream info\n");
        return result;
    }

    video_stream = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (video_stream < 0) {
      av_log(NULL, AV_LOG_ERROR, "Can't find video stream in input file\n");
      return -1;
    }

    origin_par = fmt_ctx->streams[video_stream]->codecpar;

    codec = avcodec_find_decoder(origin_par->codec_id);
    if (!codec) {
        av_log(NULL, AV_LOG_ERROR, "Can't find decoder\n");
        return -1;
    }

    ctx = avcodec_alloc_context3(codec);
    if (!ctx) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate decoder context\n");
        return AVERROR(ENOMEM);
    }

    result = avcodec_parameters_to_context(ctx, origin_par);
    if (result) {
        av_log(NULL, AV_LOG_ERROR, "Can't copy decoder context\n");
        return result;
    }

    result = avcodec_open2(ctx, codec, NULL);
    if (result < 0) {
        av_log(ctx, AV_LOG_ERROR, "Can't open decoder\n");
        return result;
    }

    fr = av_frame_alloc();
    if (!fr) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate frame\n");
        return AVERROR(ENOMEM);
    }

    byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16);
    byte_buffer = av_malloc(byte_buffer_size);
    if (!byte_buffer) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n");
        return AVERROR(ENOMEM);
    }

    printf("#tb %d: %d/%d\n", video_stream, fmt_ctx->streams[video_stream]->time_base.num, fmt_ctx->streams[video_stream]->time_base.den);
    i = 0;
    av_init_packet(&pkt);
    do {
        if (!end_of_stream)
            if (av_read_frame(fmt_ctx, &pkt) < 0)
                end_of_stream = 1;
        if (end_of_stream) {
            pkt.data = NULL;
            pkt.size = 0;
        }
        if (pkt.stream_index == video_stream || end_of_stream) {
            got_frame = 0;
            if (pkt.pts == AV_NOPTS_VALUE)
                pkt.pts = pkt.dts = i;
            result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                return result;
            }
            if (got_frame) {
                number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size,
                                        (const uint8_t* const *)fr->data, (const int*) fr->linesize,
                                        ctx->pix_fmt, ctx->width, ctx->height, 1);
                if (number_of_written_bytes < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n");
                    return number_of_written_bytes;
                }
                printf("%d, %10"PRId64", %10"PRId64", %8"PRId64", %8d, 0x%08lx\n", video_stream,
                        fr->pts, fr->pkt_dts, fr->pkt_duration,
                        number_of_written_bytes, av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes));
            }
            av_packet_unref(&pkt);
            av_init_packet(&pkt);
        }
        i++;
    } while (!end_of_stream || got_frame);

    av_packet_unref(&pkt);
    av_frame_free(&fr);
    avcodec_close(ctx);
    avformat_close_input(&fmt_ctx);
    avcodec_free_context(&ctx);
    av_freep(&byte_buffer);
    return 0;
}
Ejemplo n.º 20
0
static av_cold int movie_init(AVFilterContext *ctx)
{
    MovieContext *movie = ctx->priv;
    AVInputFormat *iformat = NULL;
    AVStream *st;
    AVCodec *codec;
    int ret;
    int64_t timestamp;

    av_register_all();

    // Try to find the movie format (container)
    iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;

    movie->format_ctx = NULL;
    if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Failed to avformat_open_input '%s'\n", movie->file_name);
        return ret;
    }
    if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0)
        av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n");

    // if seeking requested, we execute it
    if (movie->seek_point > 0) {
        timestamp = movie->seek_point;
        // add the stream start time, should it exist
        if (movie->format_ctx->start_time != AV_NOPTS_VALUE) {
            if (timestamp > INT64_MAX - movie->format_ctx->start_time) {
                av_log(ctx, AV_LOG_ERROR,
                       "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n",
                       movie->file_name, movie->format_ctx->start_time, movie->seek_point);
                return AVERROR(EINVAL);
            }
            timestamp += movie->format_ctx->start_time;
        }
        if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) {
            av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n",
                   movie->file_name, timestamp);
            return ret;
        }
    }

    /* select the video stream */
    if ((ret = av_find_best_stream(movie->format_ctx, AVMEDIA_TYPE_VIDEO,
                                   movie->stream_index, -1, NULL, 0)) < 0) {
        av_log(ctx, AV_LOG_ERROR, "No video stream with index '%d' found\n",
               movie->stream_index);
        return ret;
    }
    movie->stream_index = ret;
    st = movie->format_ctx->streams[movie->stream_index];

    /*
     * So now we've got a pointer to the so-called codec context for our video
     * stream, but we still have to find the actual codec and open it.
     */
    codec = avcodec_find_decoder(st->codecpar->codec_id);
    if (!codec) {
        av_log(ctx, AV_LOG_ERROR, "Failed to find any codec\n");
        return AVERROR(EINVAL);
    }

    movie->codec_ctx = avcodec_alloc_context3(codec);
    if (!movie->codec_ctx)
        return AVERROR(ENOMEM);

    ret = avcodec_parameters_to_context(movie->codec_ctx, st->codecpar);
    if (ret < 0)
        return ret;

    movie->codec_ctx->refcounted_frames = 1;

    if ((ret = avcodec_open2(movie->codec_ctx, codec, NULL)) < 0) {
        av_log(ctx, AV_LOG_ERROR, "Failed to open codec\n");
        return ret;
    }

    movie->w = movie->codec_ctx->width;
    movie->h = movie->codec_ctx->height;

    av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
           movie->seek_point, movie->format_name, movie->file_name,
           movie->stream_index);

    return 0;
}
Ejemplo n.º 21
0
avcodec_decoder avcodec_decoder_create(const opencv_mat buf) {
    avcodec_decoder d = new struct avcodec_decoder_struct();
    memset(d, 0, sizeof(struct avcodec_decoder_struct));
    d->mat = static_cast<const cv::Mat *>(buf);

    d->container = avformat_alloc_context();
    if (!d->container) {
        avcodec_decoder_release(d);
        return NULL;
    }

    d->avio = avio_alloc_context(NULL, 0, 0, d, avcodec_decoder_read_callback,
                                 NULL, avcodec_decoder_seek_callback);
    if (!d->avio) {
        avcodec_decoder_release(d);
        return NULL;
    }
    d->container->pb = d->avio;

    int res = avformat_open_input(&d->container, NULL, NULL, NULL);
    if (res < 0) {
        avformat_free_context(d->container);
        d->container = NULL;
        avcodec_decoder_release(d);
        return NULL;
    }

    res = avformat_find_stream_info(d->container, NULL);
    if (res < 0) {
        avcodec_decoder_release(d);
        return NULL;
    }

    if (avcodec_decoder_is_audio(d)) {
        // in this case, quit out fast since we won't be decoding this anyway
        // (audio is metadata-only)
        return d;
    }

    AVCodecParameters *codec_params = NULL;
    for (int i = 0; i < d->container->nb_streams; i++) {
        if (d->container->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            codec_params = d->container->streams[i]->codecpar;
            d->video_stream_index = i;
            break;
        }
    }
    if (!codec_params) {
        avcodec_decoder_release(d);
        return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(codec_params->codec_id);
    if (!codec) {
        avcodec_decoder_release(d);
        return NULL;
    }

    d->codec = avcodec_alloc_context3(codec);

    res = avcodec_parameters_to_context(d->codec, codec_params);
    if (res < 0) {
        avcodec_decoder_release(d);
        return NULL;
    }

    res = avcodec_open2(d->codec, codec, NULL);
    if (res < 0) {
        avcodec_decoder_release(d);
        return NULL;
    }

    return d;
}
Ejemplo n.º 22
0
/* "user interface" functions */
static void dump_stream_format(AVFormatContext *ic, int i,
                               int index, int is_output)
{
    char buf[256];
    int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
    AVStream *st = ic->streams[i];
    AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
    char *separator = ic->dump_separator;
    AVCodecContext *avctx;
    int ret;

    avctx = avcodec_alloc_context3(NULL);
    if (!avctx)
        return;

    ret = avcodec_parameters_to_context(avctx, st->codecpar);
    if (ret < 0) {
        avcodec_free_context(&avctx);
        return;
    }

    // Fields which are missing from AVCodecParameters need to be taken from the AVCodecContext
    avctx->properties = st->codec->properties;
    avctx->codec      = st->codec->codec;
    avctx->qmin       = st->codec->qmin;
    avctx->qmax       = st->codec->qmax;
    avctx->coded_width  = st->codec->coded_width;
    avctx->coded_height = st->codec->coded_height;

    if (separator)
        av_opt_set(avctx, "dump_separator", separator, 0);
    avcodec_string(buf, sizeof(buf), avctx, is_output);
    avcodec_free_context(&avctx);

    av_log(NULL, AV_LOG_INFO, "    Stream #%d:%d", index, i);

    /* the pid is an important information, so we display it */
    /* XXX: add a generic system */
    if (flags & AVFMT_SHOW_IDS)
        av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
    if (lang)
        av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
    av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
           st->time_base.num, st->time_base.den);
    av_log(NULL, AV_LOG_INFO, ": %s", buf);

    if (st->sample_aspect_ratio.num &&
        av_cmp_q(st->sample_aspect_ratio, st->codecpar->sample_aspect_ratio)) {
        AVRational display_aspect_ratio;
        av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
                  st->codecpar->width  * (int64_t)st->sample_aspect_ratio.num,
                  st->codecpar->height * (int64_t)st->sample_aspect_ratio.den,
                  1024 * 1024);
        av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
               st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
               display_aspect_ratio.num, display_aspect_ratio.den);
    }

    if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
        int fps = st->avg_frame_rate.den && st->avg_frame_rate.num;
        int tbr = st->r_frame_rate.den && st->r_frame_rate.num;
        int tbn = st->time_base.den && st->time_base.num;
        int tbc = st->codec->time_base.den && st->codec->time_base.num;

        if (fps || tbr || tbn || tbc)
            av_log(NULL, AV_LOG_INFO, "%s", separator);

        if (fps)
            print_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps");
        if (tbr)
            print_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr");
        if (tbn)
            print_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn");
        if (tbc)
            print_fps(1 / av_q2d(st->codec->time_base), "tbc");
    }

    if (st->disposition & AV_DISPOSITION_DEFAULT)
        av_log(NULL, AV_LOG_INFO, " (default)");
    if (st->disposition & AV_DISPOSITION_DUB)
        av_log(NULL, AV_LOG_INFO, " (dub)");
    if (st->disposition & AV_DISPOSITION_ORIGINAL)
        av_log(NULL, AV_LOG_INFO, " (original)");
    if (st->disposition & AV_DISPOSITION_COMMENT)
        av_log(NULL, AV_LOG_INFO, " (comment)");
    if (st->disposition & AV_DISPOSITION_LYRICS)
        av_log(NULL, AV_LOG_INFO, " (lyrics)");
    if (st->disposition & AV_DISPOSITION_KARAOKE)
        av_log(NULL, AV_LOG_INFO, " (karaoke)");
    if (st->disposition & AV_DISPOSITION_FORCED)
        av_log(NULL, AV_LOG_INFO, " (forced)");
    if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
        av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
    if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
        av_log(NULL, AV_LOG_INFO, " (visual impaired)");
    if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
        av_log(NULL, AV_LOG_INFO, " (clean effects)");
    if (st->disposition & AV_DISPOSITION_DESCRIPTIONS)
        av_log(NULL, AV_LOG_INFO, " (descriptions)");
    if (st->disposition & AV_DISPOSITION_DEPENDENT)
        av_log(NULL, AV_LOG_INFO, " (dependent)");
    av_log(NULL, AV_LOG_INFO, "\n");

    dump_metadata(NULL, st->metadata, "    ");

    dump_sidedata(NULL, st, "    ");
}
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodecParameters       *pCodecParam = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVPacket        packet;
    int             send_packet, receive_frame;
    //float           aspect_ratio;
    AVFrame        *pict;
    /*
    std::unique_ptr<AVFrame, std::function<void(AVFrame*)>> frame_converted{
        av_frame_alloc(),
        [](AVFrame* f){ av_free(f->data[0]); } };
    if (av_frame_copy_props(frame_converted.get(),
        frame_decoded.get()) < 0) {
        throw std::runtime_error("Copying frame properties");
    }
    if (av_image_alloc(
        frame_converted->data, frame_converted->linesize,
        video_decoder_->width(), video_decoder_->height(),
        video_decoder_->pixel_format(), 1) < 0) {
        throw std::runtime_error("Allocating picture");
    }
    */
    AVDictionary    *optionsDict = NULL;
    struct SwsContext *sws_ctx = NULL;

    SDL_Texture*    pTexture = nullptr;
    SDL_Window*     pWindows = nullptr;
    SDL_Renderer*   pRenderer = nullptr;

    SDL_Event       event;

    if (argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    //AVCodecContext *codec is deprecated,so use the codecpar struct (AVCodecParameters) instead.
    pCodecParam = pFormatCtx->streams[videoStream]->codecpar;
    //but function avcodec_open2() need pCodecCtx,so copy  (AVCodecParameters) pCodecParam to (AVCodecContext) pCodecCtx
    pCodec = avcodec_find_decoder(pCodecParam->codec_id);
    // Find the decoder for the video stream
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pCodecParam);

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Make a screen to put our video
#ifndef __DARWIN__
    pWindows = SDL_CreateWindow(argv[1],SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,pCodecParam->width, pCodecParam->height,SDL_WINDOW_BORDERLESS|SDL_WINDOW_RESIZABLE);
#else
    screen = SDL_SetVideoMode(pCodecParam->width, pCodecParam->height, 24, 0);
#endif
    if (!pWindows) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    
    // Allocate a place to put our YUV image on that screen
    pRenderer = SDL_CreateRenderer(pWindows, -1, 0);
    if (!pRenderer) {
        fprintf(stderr, "SDL: could not create renderer - exiting\n");
        exit(1);
    }
    pTexture = SDL_CreateTexture(pRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecParam->width, pCodecParam->height);
    sws_ctx =
        sws_getContext
        (
        pCodecParam->width,
        pCodecParam->height,
        (AVPixelFormat)pCodecParam->format,
        pCodecParam->width,
        pCodecParam->height,
        AV_PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
        );
    pict = av_frame_alloc();
    if (pict == nullptr){
        exit(1);
    }
    if (av_image_alloc(pict->data, pict->linesize,
        pCodecParam->width, pCodecParam->height,
        (AVPixelFormat)pCodecParam->format, 1) < 0){
        exit(1);
    }


    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            //avcodec_decode_video2 is deprecated Use avcodec_send_packet() and avcodec_receive_frame().
            send_packet = avcodec_send_packet(pCodecCtx, &packet);
            receive_frame = avcodec_receive_frame(pCodecCtx, pFrame);

            // Did we get a video frame?
            if (send_packet == SEND_PACKET_SUCCESS && receive_frame == RECEIVE_FRAME_SUCCESS) {
                //SDL_LockYUVOverlay(bmp);
                //SDL_LockTexture(pTexture,NULL,);
                // Convert the image into YUV format that SDL uses
                if (av_frame_copy_props(pFrame,
                    pict) < 0) {
                    exit(1);
                }

                sws_scale
                    (
                    sws_ctx,
                    pFrame->data,
                    pFrame->linesize,
                    0,
                    pCodecParam->height,
                    pict->data,
                    pict->linesize
                    );
                
                //SDL_UnlockYUVOverlay(bmp);
                SDL_UpdateYUVTexture(pTexture, NULL, pict->data[0], pict->linesize[0], pict->data[1], pict->linesize[1], pict->data[2], pict->linesize[2]);
                SDL_RenderCopy(pRenderer, pTexture, NULL, NULL);
                SDL_RenderPresent(pRenderer);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_packet_unref(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_DestroyRenderer(pRenderer);
            SDL_DestroyTexture(pTexture);
            SDL_DestroyWindow(pWindows);
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_frame_free(&pFrame);
    //free pict
    av_freep(&pict->data[0]);
    av_frame_free(&pict);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
Ejemplo n.º 24
0
static av_cold int init_subtitles(AVFilterContext *ctx)
{
    int j, ret, sid;
    int k = 0;
    AVDictionary *codec_opts = NULL;
    AVFormatContext *fmt = NULL;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    const AVCodecDescriptor *dec_desc;
    AVStream *st;
    AVPacket pkt;
    AssContext *ass = ctx->priv;

    /* Init libass */
    ret = init(ctx);
    if (ret < 0)
        return ret;
    ass->track = ass_new_track(ass->library);
    if (!ass->track) {
        av_log(ctx, AV_LOG_ERROR, "Could not create a libass track\n");
        return AVERROR(EINVAL);
    }

    /* Open subtitles file */
    ret = avformat_open_input(&fmt, ass->filename, NULL, NULL);
    if (ret < 0) {
        av_log(ctx, AV_LOG_ERROR, "Unable to open %s\n", ass->filename);
        goto end;
    }
    ret = avformat_find_stream_info(fmt, NULL);
    if (ret < 0)
        goto end;

    /* Locate subtitles stream */
    if (ass->stream_index < 0)
        ret = av_find_best_stream(fmt, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0);
    else {
        ret = -1;
        if (ass->stream_index < fmt->nb_streams) {
            for (j = 0; j < fmt->nb_streams; j++) {
                if (fmt->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
                    if (ass->stream_index == k) {
                        ret = j;
                        break;
                    }
                    k++;
                }
            }
        }
    }

    if (ret < 0) {
        av_log(ctx, AV_LOG_ERROR, "Unable to locate subtitle stream in %s\n",
               ass->filename);
        goto end;
    }
    sid = ret;
    st = fmt->streams[sid];

    /* Load attached fonts */
    for (j = 0; j < fmt->nb_streams; j++) {
        AVStream *st = fmt->streams[j];
        if (st->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT &&
            attachment_is_font(st)) {
            const AVDictionaryEntry *tag = NULL;
            tag = av_dict_get(st->metadata, "filename", NULL,
                              AV_DICT_MATCH_CASE);

            if (tag) {
                av_log(ctx, AV_LOG_DEBUG, "Loading attached font: %s\n",
                       tag->value);
                ass_add_font(ass->library, tag->value,
                             st->codecpar->extradata,
                             st->codecpar->extradata_size);
            } else {
                av_log(ctx, AV_LOG_WARNING,
                       "Font attachment has no filename, ignored.\n");
            }
        }
    }

    /* Initialize fonts */
    ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);

    /* Open decoder */
    dec = avcodec_find_decoder(st->codecpar->codec_id);
    if (!dec) {
        av_log(ctx, AV_LOG_ERROR, "Failed to find subtitle codec %s\n",
               avcodec_get_name(st->codecpar->codec_id));
        return AVERROR(EINVAL);
    }
    dec_desc = avcodec_descriptor_get(st->codecpar->codec_id);
    if (dec_desc && !(dec_desc->props & AV_CODEC_PROP_TEXT_SUB)) {
        av_log(ctx, AV_LOG_ERROR,
               "Only text based subtitles are currently supported\n");
        return AVERROR_PATCHWELCOME;
    }
    if (ass->charenc)
        av_dict_set(&codec_opts, "sub_charenc", ass->charenc, 0);
    if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57,26,100))
        av_dict_set(&codec_opts, "sub_text_format", "ass", 0);

    dec_ctx = avcodec_alloc_context3(dec);
    if (!dec_ctx)
        return AVERROR(ENOMEM);

    ret = avcodec_parameters_to_context(dec_ctx, st->codecpar);
    if (ret < 0)
        goto end;

    /*
     * This is required by the decoding process in order to rescale the
     * timestamps: in the current API the decoded subtitles have their pts
     * expressed in AV_TIME_BASE, and thus the lavc internals need to know the
     * stream time base in order to achieve the rescaling.
     *
     * That API is old and needs to be reworked to match behaviour with A/V.
     */
    av_codec_set_pkt_timebase(dec_ctx, st->time_base);

    ret = avcodec_open2(dec_ctx, NULL, &codec_opts);
    if (ret < 0)
        goto end;

    if (ass->force_style) {
        char **list = NULL;
        char *temp = NULL;
        char *ptr = av_strtok(ass->force_style, ",", &temp);
        int i = 0;
        while (ptr) {
            av_dynarray_add(&list, &i, ptr);
            if (!list) {
                ret = AVERROR(ENOMEM);
                goto end;
            }
            ptr = av_strtok(NULL, ",", &temp);
        }
        av_dynarray_add(&list, &i, NULL);
        if (!list) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        ass_set_style_overrides(ass->library, list);
        av_free(list);
    }
    /* Decode subtitles and push them into the renderer (libass) */
    if (dec_ctx->subtitle_header)
        ass_process_codec_private(ass->track,
                                  dec_ctx->subtitle_header,
                                  dec_ctx->subtitle_header_size);
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    while (av_read_frame(fmt, &pkt) >= 0) {
        int i, got_subtitle;
        AVSubtitle sub = {0};

        if (pkt.stream_index == sid) {
            ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt);
            if (ret < 0) {
                av_log(ctx, AV_LOG_WARNING, "Error decoding: %s (ignored)\n",
                       av_err2str(ret));
            } else if (got_subtitle) {
                const int64_t start_time = av_rescale_q(sub.pts, AV_TIME_BASE_Q, av_make_q(1, 1000));
                const int64_t duration   = sub.end_display_time;
                for (i = 0; i < sub.num_rects; i++) {
                    char *ass_line = sub.rects[i]->ass;
                    if (!ass_line)
                        break;
                    if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,25,100))
                        ass_process_data(ass->track, ass_line, strlen(ass_line));
                    else
                        ass_process_chunk(ass->track, ass_line, strlen(ass_line),
                                          start_time, duration);
                }
            }
        }
        av_packet_unref(&pkt);
        avsubtitle_free(&sub);
    }

end:
    av_dict_free(&codec_opts);
    avcodec_close(dec_ctx);
    avcodec_free_context(&dec_ctx);
    avformat_close_input(&fmt);
    return ret;
}
Ejemplo n.º 25
0
bool DWVideo::Init(const char* url)
{

	m_audio_frame = av_frame_alloc();

	if(avformat_open_input(&m_avformat_context,url,NULL,NULL))
	{
		exit(-1);
	}

	if(avformat_find_stream_info(m_avformat_context, NULL))
	{
		return false;
	}

	av_dump_format(m_avformat_context,0,url,0);


	video_stream = -1;
	audio_stream = -1;

	for(int i =0; i < m_avformat_context->nb_streams; i++)
	{
		if(m_avformat_context->streams[i]->
			codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			video_stream = i;
		}else if(m_avformat_context->streams[i]->
			codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			audio_stream = i;
		}
	}

	if(video_stream == -1)
	{
		fprintf(stderr,"비디오 스트림 못찾음 !\n");
		return false;
	}
	if(audio_stream == -1)
	{
		fprintf(stderr,"오디오 스트림 못찾음 !\n");
		return false;
	}
/*
	m_audio_codec_context_orig =
		m_avformat_context->streams[audio_stream]->codecpar;

	m_audio_codec =
		avcodec_find_decoder(m_audio_codec_context_orig->codec_id);

	if(m_audio_codec == NULL)
	{
		fprintf(stderr,"오디오 코덱 못 열음!\n");
		return false;
	}

	m_audio_codec_context = avcodec_alloc_context3(m_audio_codec);
	if(avcodec_parameters_to_context(m_audio_codec_context,m_audio_codec_context_orig) != 0)
	{
		return false;
	}

	if(avcodec_open2(m_audio_codec_context,m_audio_codec,NULL)<0)
	{
		return false;
	}
	
	packet_queue_init(&m_audio_q);
	m_wanted_spec.freq = m_audio_codec_context->sample_rate;
	m_wanted_spec.format = AUDIO_S16SYS;
	m_wanted_spec.channels = m_audio_codec_context->channels;
	m_wanted_spec.silence = 0;
	//sdl이 추가 오디오 데이터를 요구할때 버퍼 사이즈
	m_wanted_spec.samples = 8000;
	m_wanted_spec.callback = AudioCallback;
	m_wanted_spec.userdata = m_audio_codec_context;

	if(SDL_OpenAudio(&m_wanted_spec,&m_spec) < 0)
	{
		fprintf(stderr,"오디오 오픈 실패 !\n");
		return false;
	}

	SDL_PauseAudio(0);*/
	
	m_codec_context_orig =
		m_avformat_context->streams[video_stream]->codecpar;


	m_codec = avcodec_find_decoder(m_codec_context_orig->codec_id);
	if(m_codec == NULL)
	{
		fprintf(stderr,"코덱 못찾음 !\n");
		return false;
	}

	m_codec_context = avcodec_alloc_context3(m_codec);
	if(avcodec_parameters_to_context(m_codec_context,m_codec_context_orig) != 0)
	{
		return false;
	}

	if(avcodec_open2(m_codec_context,m_codec,NULL)<0)
	{
		return false;
	}



	m_frame = av_frame_alloc();

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO|SDL_INIT_TIMER))
	{
		fprintf(stderr, "SDL초기화 에러 \n");
		return false;
	}
	
	m_texture = SDL_CreateTexture
		(game::Instance()->getRenderer(),SDL_PIXELFORMAT_IYUV,SDL_TEXTUREACCESS_STREAMING,
		m_codec_context_orig->width,m_codec_context_orig->height);


	delay_time = 1000.0f/FPS;
	start_time = 0;
	return true;
}
Ejemplo n.º 26
0
int main(int argc, char* argv[]) {
	printf("Play simple video\n");
	if(argc < 2) {
		printf("Miss input video");
		return -1;
	}
	int ret = -1, i = -1, v_stream_idx = -1;
	char* vf_path = argv[1];
	// f**k, fmt_ctx must be inited by NULL
	AVFormatContext* fmt_ctx = NULL;
	AVCodecContext* codec_ctx = NULL;
	AVCodec* codec;
	AVFrame * frame;
	AVPacket packet;

	av_register_all();
	ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL);
	if(ret < 0){
		printf("Open video file %s failed \n", vf_path);
		goto end;
	}
	if(avformat_find_stream_info(fmt_ctx, NULL)<0)
    	goto end;
    av_dump_format(fmt_ctx, 0, vf_path, 0);
    for(i = 0; i< fmt_ctx->nb_streams; i++) {
    	if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
    		v_stream_idx = i;
    		break;
    	}
    }
    if(v_stream_idx == -1) {
		printf("Cannot find video stream\n");
		goto end;
	}

	codec_ctx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar);
	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if(codec == NULL){
		printf("Unsupported codec for video file\n");
		goto end;
	}
	if(avcodec_open2(codec_ctx, codec, NULL) < 0){
		printf("Can not open codec\n");
		goto end;
	}

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    	printf("Could not init SDL due to %s", SDL_GetError());
    	goto end;
    }
    SDL_Window *window;
    SDL_Renderer *renderer;
    SDL_Texture *texture;
    SDL_Event event;
    SDL_Rect r;
    window = SDL_CreateWindow("SDL_CreateTexture", SDL_WINDOWPOS_UNDEFINED,
    	SDL_WINDOWPOS_UNDEFINED, codec_ctx->width, codec_ctx->height,
    	SDL_WINDOW_RESIZABLE);
    r.x = 0;
    r.y = 0;
    r.w = codec_ctx->width;
    r.h = codec_ctx->height;

    renderer = SDL_CreateRenderer(window, -1, 0);
    // texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET,
    // 	codec_ctx->width, codec_ctx->height);
    texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
    	codec_ctx->width, codec_ctx->height);

    struct SwsContext      *sws_ctx = NULL;
    sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
    	codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    frame = av_frame_alloc();

    int ret1, ret2;
    AVFrame* pict;
    pict = av_frame_alloc();


	int             numBytes;
	uint8_t         *buffer = NULL;
  	numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
			      codec_ctx->height);
  	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    // required, or bad dst image pointers
	avpicture_fill((AVPicture *)pict, buffer, AV_PIX_FMT_YUV420P,
		 codec_ctx->width, codec_ctx->height);
    i = 0;
	while (1) {
        SDL_PollEvent(&event);
        if(event.type == SDL_QUIT)
                break;
        ret = av_read_frame(fmt_ctx, &packet);
        if(ret <0){
        	continue;
        }
        if(packet.stream_index == v_stream_idx) {
			ret1 = avcodec_send_packet(codec_ctx, &packet);
			ret2 = avcodec_receive_frame(codec_ctx, frame);
			if(ret2 < 0 ){
				continue;
	    	}
	    	sws_scale(sws_ctx, (uint8_t const * const *)frame->data,
			      frame->linesize, 0, codec_ctx->height,
			      pict->data, pict->linesize);
	   //  	if(++i <=5 ){
				// save_frame(pict, codec_ctx->width, codec_ctx->height, i);
	   //  	}
	        SDL_UpdateYUVTexture(texture, &r, pict->data[0], pict->linesize[0],
	        	pict->data[1], pict->linesize[1],
	        	pict->data[2], pict->linesize[2]);
	        // SDL_UpdateTexture(texture, &r, pict->data[0], pict->linesize[0]);

	        // r.x=rand()%500;
	        // r.y=rand()%500;

	        // SDL_SetRenderTarget(renderer, texture);
	        // SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0x00);
	        SDL_RenderClear(renderer);
	        // SDL_RenderDrawRect(renderer,&r);
	        // SDL_SetRenderDrawColor(renderer, 0xFF, 0x00, 0x00, 0x00);
	        // SDL_RenderFillRect(renderer, &r);
	        // SDL_SetRenderTarget(renderer, NULL);
	        SDL_RenderCopy(renderer, texture, NULL, NULL);
	        // SDL_RenderCopy(renderer, texture, &r, &r);
	        SDL_RenderPresent(renderer);
	        // SDL_Delay(50);
        }
        av_packet_unref(&packet);
    }

    SDL_DestroyRenderer(renderer);
    SDL_Quit();
	av_frame_free(&frame);
	avcodec_close(codec_ctx);
	avcodec_free_context(&codec_ctx);
    end:
	avformat_close_input(&fmt_ctx);
	printf("Shutdown\n");
	return 0;
}
Ejemplo n.º 27
0
static int seek_test(const char *input_filename, const char *start, const char *end)
{
    AVCodec *codec = NULL;
    AVCodecContext *ctx= NULL;
    AVCodecParameters *origin_par = NULL;
    AVFrame *fr = NULL;
    AVFormatContext *fmt_ctx = NULL;
    int video_stream;
    int result;
    int i, j;
    long int start_ts, end_ts;

    size_of_array = 0;
    number_of_elements = 0;
    crc_array = pts_array = NULL;

    result = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL);
    if (result < 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't open file\n");
        return result;
    }

    result = avformat_find_stream_info(fmt_ctx, NULL);
    if (result < 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't get stream info\n");
        return result;
    }

    start_ts = read_seek_range(start);
    end_ts = read_seek_range(end);
    if ((start_ts < 0) || (end_ts < 0))
        return -1;

    //TODO: add ability to work with audio format
    video_stream = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (video_stream < 0) {
      av_log(NULL, AV_LOG_ERROR, "Can't find video stream in input file\n");
      return -1;
    }

    origin_par = fmt_ctx->streams[video_stream]->codecpar;

    codec = avcodec_find_decoder(origin_par->codec_id);
    if (!codec) {
        av_log(NULL, AV_LOG_ERROR, "Can't find decoder\n");
        return -1;
    }

    ctx = avcodec_alloc_context3(codec);
    if (!ctx) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate decoder context\n");
        return AVERROR(ENOMEM);
    }

    result = avcodec_parameters_to_context(ctx, origin_par);
    if (result) {
        av_log(NULL, AV_LOG_ERROR, "Can't copy decoder context\n");
        return result;
    }

    result = avcodec_open2(ctx, codec, NULL);
    if (result < 0) {
        av_log(ctx, AV_LOG_ERROR, "Can't open decoder\n");
        return result;
    }

    fr = av_frame_alloc();
    if (!fr) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate frame\n");
        return AVERROR(ENOMEM);
    }

    result = compute_crc_of_packets(fmt_ctx, video_stream, ctx, fr, i, j, 1);
    if (result != 0)
        return -1;

    for (i = start_ts; i < end_ts; i += 100) {
        for (j = i + 100; j < end_ts; j += 100)
        result = compute_crc_of_packets(fmt_ctx, video_stream, ctx, fr, i, j, 0);
        if (result != 0)
            return -1;
    }

    av_freep(&crc_array);
    av_freep(&pts_array);
    av_frame_free(&fr);
    avcodec_close(ctx);
    avformat_close_input(&fmt_ctx);
    avcodec_free_context(&ctx);
    return 0;
}
Ejemplo n.º 28
0
STATUS DemuxerLibAV::selectVideoStream() {
	if (!_initialized) {
		log->printf("DemuxerLibAV::selectVideoStream(): demuxer not opened!\n");
		return S_FAIL;
	}

	for (U32 i = 0; i < _afc->nb_streams; i++) {
		AVStream *stream = _afc->streams[i];
		AVCodec *codec = avcodec_find_decoder(stream->codecpar->codec_id);
		if (codec == NULL) {
			log->printf("DemuxerLibAV::selectVideoStream(): avcodec_find_decoder failed!\n");
			return S_FAIL;
		}
		AVCodecContext *cc = avcodec_alloc_context3(codec);
		if (cc == NULL) {
			log->printf("DemuxerLibAV::selectVideoStream(): avcodec_alloc_context3 failed!\n");
			return S_FAIL;
		}
		if (avcodec_parameters_to_context(cc, stream->codecpar) < 0) {
			log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_to_context failed!\n");
			avcodec_free_context(&cc);
			return S_FAIL;
		}
		if (cc->codec_type == AVMEDIA_TYPE_VIDEO) {
			_videoStream = stream;
			if (cc->codec_id == AV_CODEC_ID_H264) {
				if (cc->extradata && cc->extradata_size >= 8 && cc->extradata[0] == 1) {
					const AVBitStreamFilter *bsf = av_bsf_get_by_name("h264_mp4toannexb");
					if (bsf == nullptr) {
						log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_get_by_name failed!\n");
						avcodec_free_context(&cc);
						return S_FAIL;
					}
				    if (av_bsf_alloc(bsf, &_bsf) < 0) {
						log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_alloc failed!\n");
						avcodec_free_context(&cc);
						return S_FAIL;
					}
				    if (avcodec_parameters_from_context(_bsf->par_in, cc) < 0)
				    {
						log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_from_context failed!\n");
						av_bsf_free(&_bsf);
						avcodec_free_context(&cc);
						return S_FAIL;
				    }
				    _bsf->time_base_in = cc->time_base;
					if (av_bsf_init(_bsf) < 0) {
						log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_init failed!\n");
						av_bsf_free(&_bsf);
						avcodec_free_context(&cc);
						return S_FAIL;
					}
				}
			} else if (cc->codec_id == AV_CODEC_ID_MPEG4) {
				const AVBitStreamFilter *bsf = av_bsf_get_by_name("mpeg4_unpack_bframes");
				if (bsf == nullptr) {
					log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_get_by_name failed!\n");
					avcodec_free_context(&cc);
					return S_FAIL;
				}
			    if (av_bsf_alloc(bsf, &_bsf) < 0) {
					log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_alloc failed!\n");
					avcodec_free_context(&cc);
					return S_FAIL;
				}
			    if (avcodec_parameters_from_context(_bsf->par_in, cc) < 0)
			    {
					log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_from_context failed!\n");
					av_bsf_free(&_bsf);
					avcodec_free_context(&cc);
					return S_FAIL;
			    }
			    _bsf->time_base_in = cc->time_base;
				if (av_bsf_init(_bsf) < 0) {
					log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_init failed!\n");
					av_bsf_free(&_bsf);
					avcodec_free_context(&cc);
					return S_FAIL;
				}
			} else if (cc->codec_id == AV_CODEC_ID_HEVC) {
				if (cc->extradata && cc->extradata_size >= 8 && cc->extradata[0] == 1) {
					const AVBitStreamFilter *bsf = av_bsf_get_by_name("hevc_mp4toannexb");
					if (bsf == nullptr) {
						log->printf("DemuxerLibAV::selectVideoStream(): av_bitstream_filter_init failed!\n");
						avcodec_free_context(&cc);
						return S_FAIL;
					}
				    if (av_bsf_alloc(bsf, &_bsf) < 0) {
						log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_alloc failed!\n");
						avcodec_free_context(&cc);
						return S_FAIL;
					}
				    if (avcodec_parameters_from_context(_bsf->par_in, cc) < 0)
				    {
						log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_from_context failed!\n");
						av_bsf_free(&_bsf);
						avcodec_free_context(&cc);
						return S_FAIL;
				    }
				    _bsf->time_base_in = cc->time_base;
					if (av_bsf_init(_bsf) < 0) {
						log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_init failed!\n");
						av_bsf_free(&_bsf);
						avcodec_free_context(&cc);
						return S_FAIL;
					}
				}
			}

			_videoStreamInfo.width = (U32)cc->width;
			_videoStreamInfo.height = (U32)cc->height;
			_videoStreamInfo.timeBaseScale = (U32)cc->time_base.num;
			_videoStreamInfo.timeBaseRate = (U32)cc->time_base.den;
			_videoStreamInfo.priv = cc;

			switch (cc->codec_id) {
			case AV_CODEC_ID_MPEG1VIDEO:
				_videoStreamInfo.codecId = CODEC_ID_MPEG1VIDEO;
				break;
			case AV_CODEC_ID_MPEG2VIDEO:
				_videoStreamInfo.codecId = CODEC_ID_MPEG2VIDEO;
				break;
			case AV_CODEC_ID_H261:
				_videoStreamInfo.codecId = CODEC_ID_H261;
				break;
			case AV_CODEC_ID_H263:
				_videoStreamInfo.codecId = CODEC_ID_H263;
				break;
			case AV_CODEC_ID_MPEG4:
				_videoStreamInfo.codecId = CODEC_ID_MPEG4;
				break;
			case AV_CODEC_ID_MSMPEG4V1:
				_videoStreamInfo.codecId = CODEC_ID_MSMPEG4V1;
				break;
			case AV_CODEC_ID_MSMPEG4V2:
				_videoStreamInfo.codecId = CODEC_ID_MSMPEG4V2;
				break;
			case AV_CODEC_ID_MSMPEG4V3:
				_videoStreamInfo.codecId = CODEC_ID_MSMPEG4V3;
				break;
			case AV_CODEC_ID_H263P:
				_videoStreamInfo.codecId = CODEC_ID_H263P;
				break;
			case AV_CODEC_ID_H263I:
				_videoStreamInfo.codecId = CODEC_ID_H263I;
				break;
			case AV_CODEC_ID_FLV1:
				_videoStreamInfo.codecId = CODEC_ID_FLV1;
				break;
			case AV_CODEC_ID_SVQ1:
				_videoStreamInfo.codecId = CODEC_ID_SVQ1;
				break;
			case AV_CODEC_ID_SVQ3:
				_videoStreamInfo.codecId = CODEC_ID_SVQ3;
				break;
			case AV_CODEC_ID_AIC:
				_videoStreamInfo.codecId = CODEC_ID_AIC;
				break;
			case AV_CODEC_ID_DVVIDEO:
				_videoStreamInfo.codecId = CODEC_ID_DVVIDEO;
				break;
			case AV_CODEC_ID_VP3:
				_videoStreamInfo.codecId = CODEC_ID_VP3;
				break;
			case AV_CODEC_ID_VP5:
				_videoStreamInfo.codecId = CODEC_ID_VP5;
				break;
			case AV_CODEC_ID_VP6:
				_videoStreamInfo.codecId = CODEC_ID_VP6;
				break;
			case AV_CODEC_ID_VP6A:
				_videoStreamInfo.codecId = CODEC_ID_VP6A;
				break;
			case AV_CODEC_ID_VP6F:
				_videoStreamInfo.codecId = CODEC_ID_VP6F;
				break;
			case AV_CODEC_ID_VP7:
				_videoStreamInfo.codecId = CODEC_ID_VP7;
				break;
			case AV_CODEC_ID_VP8:
				_videoStreamInfo.codecId = CODEC_ID_VP8;
				break;
			case AV_CODEC_ID_VP9:
				_videoStreamInfo.codecId = CODEC_ID_VP9;
				break;
			case AV_CODEC_ID_WEBP:
				_videoStreamInfo.codecId = CODEC_ID_WEBP;
				break;
			case AV_CODEC_ID_THEORA:
				_videoStreamInfo.codecId = CODEC_ID_THEORA;
				break;
			case AV_CODEC_ID_RV10:
				_videoStreamInfo.codecId = CODEC_ID_RV10;
				break;
			case AV_CODEC_ID_RV20:
				_videoStreamInfo.codecId = CODEC_ID_RV20;
				break;
			case AV_CODEC_ID_RV30:
				_videoStreamInfo.codecId = CODEC_ID_RV30;
				break;
			case AV_CODEC_ID_RV40:
				_videoStreamInfo.codecId = CODEC_ID_RV40;
				break;
			case AV_CODEC_ID_WMV1:
				_videoStreamInfo.codecId = CODEC_ID_WMV1;
				break;
			case AV_CODEC_ID_WMV2:
				_videoStreamInfo.codecId = CODEC_ID_WMV2;
				break;
			case AV_CODEC_ID_WMV3:
				_videoStreamInfo.codecId = CODEC_ID_WMV3;
				break;
			case AV_CODEC_ID_VC1:
				_videoStreamInfo.codecId = CODEC_ID_VC1;
				break;
			case AV_CODEC_ID_H264:
				_videoStreamInfo.codecId = CODEC_ID_H264;
				break;
			case AV_CODEC_ID_HEVC:
				_videoStreamInfo.codecId = CODEC_ID_HEVC;
				break;
			default:
				_videoStreamInfo.codecId = CODEC_ID_NONE;
				log->printf("DemuxerLibAV::selectVideoStream(): Unknown codec: 0x%08x!\n",
						cc->codec_id);
				avcodec_free_context(&cc);
				return S_FAIL;
			}

			switch (cc->pix_fmt) {
			case AV_PIX_FMT_RGB24:
				_videoStreamInfo.pixelfmt = FMT_RGB24;
				break;
			case AV_PIX_FMT_ARGB:
				_videoStreamInfo.pixelfmt = FMT_ARGB;
				break;
			case AV_PIX_FMT_YUV420P:
				_videoStreamInfo.pixelfmt = FMT_YUV420P;
				break;
			case AV_PIX_FMT_YUV422P:
				_videoStreamInfo.pixelfmt = FMT_YUV422P;
				break;
			case AV_PIX_FMT_YUV444P:
				_videoStreamInfo.pixelfmt = FMT_YUV444P;
				break;
			case AV_PIX_FMT_NV12:
				_videoStreamInfo.pixelfmt = FMT_NV12;
				break;
			default:
				_videoStreamInfo.pixelfmt = FMT_NONE;
				log->printf("DemuxerLibAV::selectVideoStream(): Unknown pixel format: 0x%08x!\n", cc->pix_fmt);
				avcodec_free_context(&cc);
				return S_FAIL;
			}
			return S_OK;
		}
	}

	return S_FAIL;
}
Ejemplo n.º 29
0
int MP4Decoder::InitDecoder(const char *mp4Path) {
    // 1.注册所有组件
    av_register_all();
    // 2.创建AVFormatContext结构体
    pFormatCtx = avformat_alloc_context();

    // 3.打开一个输入文件
    if (avformat_open_input(&pFormatCtx, mp4Path, NULL, NULL) != 0) {
        LOGE("could not open input stream");
        return -1;
    }
    // 4.获取媒体的信息
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        LOGE("could not find stream information");
        return -1;
    }
    //获取视频轨的下标
    int videoIndex = -1;
    for (int i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
            break;
        }
    if (videoIndex == -1) {
        LOGE("could not find a video stream");
        return -1;
    }
    // 5.查找解码器
    pCodec = avcodec_find_decoder(pFormatCtx->streams[videoIndex]->codecpar->codec_id);
    if (pCodec == NULL) {
        LOGE("could not find Codec");
        return -1;
    }

    // 6.配置解码器
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoIndex]->codecpar);
    pCodecCtx->thread_count = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        LOGE("could not open codec");
        return -1;
    }

    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();
    int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
                                              pCodecCtx->width,
                                              pCodecCtx->height, 1);
    uint8_t *out_buffer = (unsigned char *) av_malloc(bufferSize);
    av_image_fill_arrays(pFrameYUV->data,
                         pFrameYUV->linesize,
                         out_buffer,
                         AV_PIX_FMT_YUV420P,
                         pCodecCtx->width,
                         pCodecCtx->height, 1);

    pAvPacket = (AVPacket *) av_malloc(sizeof(AVPacket));

    pSwsContext = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                 pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,
                                 SWS_BICUBIC, NULL, NULL, NULL);
    return 0;
}
int main(int argc, char* argv[]) {
	printf("Read few frame and write to image\n");
	if(argc < 2) {
		printf("Missing input video file\n");
		return -1;
	}
	int ret = -1, i = 0, v_stream_idx = -1;
	char* vf_path = argv[1];
	AVFormatContext* fmt_ctx = NULL;
	AVCodecContext* codec_ctx = NULL;
	AVCodec* codec = NULL;
	AVPacket pkt;
	AVFrame* frm = NULL;

	av_register_all();
	ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL);
	if(ret < 0){
		printf("Open video file %s failed \n", vf_path);
		goto end;
	}

	// i dont know but without this function, sws_getContext does not work
	if(avformat_find_stream_info(fmt_ctx, NULL)<0)
    	return -1;

    av_dump_format(fmt_ctx, 0, argv[1], 0);

	for(i = 0; i < fmt_ctx->nb_streams; i++) {
		if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			v_stream_idx = i;
			break;
		}
	}
	if(v_stream_idx == -1) {
		printf("Cannot find video stream\n");
		goto end;
	}else{
		printf("Video stream %d with resolution %dx%d\n", v_stream_idx,
			fmt_ctx->streams[i]->codecpar->width,
			fmt_ctx->streams[i]->codecpar->height);
	}

	codec_ctx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar);

	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if(codec == NULL){
		printf("Unsupported codec for video file\n");
		goto end;
	}
	ret = avcodec_open2(codec_ctx, codec, NULL);
	if(ret < 0){
		printf("Can not open codec\n");
		goto end;
	}

	frm = av_frame_alloc();

  	struct SwsContext      *sws_ctx = NULL;
  	AVFrame         *pFrameRGB = NULL;
	int             numBytes;
	uint8_t         *buffer = NULL;

  // Allocate an AVFrame structure
  pFrameRGB=av_frame_alloc();
  if(pFrameRGB==NULL)
    return -1;
  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width,
			      codec_ctx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

  sws_ctx =
    sws_getContext
    (
        codec_ctx->width,
        codec_ctx->height,
        codec_ctx->pix_fmt,
        codec_ctx->width,
        codec_ctx->height,
        AV_PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );

	if(sws_ctx == NULL) {
		printf("Can not use sws\n");
		goto end;
	}

	avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
		 codec_ctx->width, codec_ctx->height);



	i=0;
	int ret1 = -1, ret2 = -1, fi = -1;
	while(av_read_frame(fmt_ctx, &pkt)>=0) {
		if(pkt.stream_index == v_stream_idx) {
			ret1 = avcodec_send_packet(codec_ctx, &pkt);
			ret2 = avcodec_receive_frame(codec_ctx, frm);
			printf("ret1 %d ret2 %d\n", ret1, ret2);
			// avcodec_decode_video2(codec_ctx, frm, &fi, &pkt);
		}
		// if not check ret2, error occur [swscaler @ 0x1cb3c40] bad src image pointers
		// ret2 same as fi
		// if(fi && ++i <= 5) {
		if(ret2>= 0 && ++i <= 5) {
	        sws_scale
		        (
		            sws_ctx,
		            (uint8_t const * const *)frm->data,
		            frm->linesize,
		            0,
		            codec_ctx->height,
		            pFrameRGB->data,
		            pFrameRGB->linesize
		        );

			save_frame(pFrameRGB, codec_ctx->width, codec_ctx->height, i);
			// save_frame(frm, codec_ctx->width, codec_ctx->height, i);
		}
		av_packet_unref(&pkt);
		if(i>=5){
			break;
		}
	}

	av_frame_free(&frm);

	avcodec_close(codec_ctx);
	avcodec_free_context(&codec_ctx);
	end:
	avformat_close_input(&fmt_ctx);
	printf("Shutdown\n");
	return 0;
}