Пример #1
0
Frame VideoStream::getNextFrame() {
	unique_ptr<AVFrame, AVFrameDeleter> frame = nullptr;
	if (state != AVERROR_EOF && !eof) {
		// Receive avFrame from decoder
		frame.reset(av_frame_alloc());
		state = avcodec_receive_frame(avCodecContext.get(), frame.get());
		if (state == AVERROR(EAGAIN)) {
			readPacket();
			state = avcodec_receive_frame(avCodecContext.get(), frame.get());
			if(this->firstFramePts == -1) {
			  firstFramePts = pkt.get()->pts;
			}
		}

		assert(state != AVERROR(EINVAL)); // "codec not opened, or it is an encoder" << endl;
		if (state == AVERROR(EAGAIN)) {
			//cout << "output is not available right now - user must try to send new input " << endl;
		}
		else if (state == AVERROR_EOF) {
			//cout << " the decoder has been fully flushed, and there will be no more output frames" << endl;
		}
		else {
			assert(state >= 0);
		}
	}
	Frame f(std::move(frame), *pkt.get());
	f.firstFramePts = firstFramePts; // meh, parent class should be called and know all this.
	return f;
	//return Frame(std::move(frame), *pkt.get());
}
Пример #2
0
static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
                   const char *filename)
{
    char buf[1024];
    int ret;

    ret = avcodec_send_packet(dec_ctx, pkt);
    if (ret < 0) {
        fprintf(stderr, "Error sending a packet for decoding\n");
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_frame(dec_ctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during decoding\n");
            exit(1);
        }

        printf("saving frame %3d\n", dec_ctx->frame_number);
        fflush(stdout);

        /* the picture is allocated by the decoder. no need to
           free it */
        snprintf(buf, sizeof(buf), filename, dec_ctx->frame_number);
        pgm_save(frame->data[0], frame->linesize[0],
                 frame->width, frame->height, buf);
    }
}
Пример #3
0
static int avcodec_decoder_copy_frame(const avcodec_decoder d, opencv_mat mat, AVFrame *frame) {
    auto cvMat = static_cast<cv::Mat *>(mat);

    int res = avcodec_receive_frame(d->codec, frame);
    if (res >= 0 && frame->width == cvMat->cols && frame->height == cvMat->rows) {
        int stepSize = 4 * frame->width;
        if (frame->width % 32 != 0) {
            int width = frame->width + 32 - (frame->width % 32);
            stepSize = 4 * width;
        }
        if (!opencv_mat_set_row_stride(mat, stepSize)) {
            return -1;
        }

        struct SwsContext *sws = sws_getContext(frame->width, frame->height, (AVPixelFormat)(frame->format),
                                                frame->width, frame->height, AV_PIX_FMT_BGRA,
                                                0, NULL, NULL, NULL);
        int linesizes[] = {stepSize, 0, 0, 0};
        uint8_t *data_ptrs[] = {cvMat->data, NULL, NULL, NULL};
        sws_scale(sws, frame->data, frame->linesize, 0, frame->height, data_ptrs, linesizes);
        sws_freeContext(sws);
    }

    return res;
}
Пример #4
0
CDVDVideoCodec::VCReturn CDVDVideoCodecDRMPRIME::GetPicture(VideoPicture* pVideoPicture)
{
  if (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN)
    Drain();

  int ret = avcodec_receive_frame(m_pCodecContext, m_pFrame);
  if (ret == AVERROR(EAGAIN))
    return VC_BUFFER;
  else if (ret == AVERROR_EOF)
    return VC_EOF;
  else if (ret)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecDRMPRIME::%s - receive frame failed, ret:%d", __FUNCTION__, ret);
    return VC_ERROR;
  }

  if (pVideoPicture->videoBuffer)
    pVideoPicture->videoBuffer->Release();
  pVideoPicture->videoBuffer = nullptr;

  SetPictureParams(pVideoPicture);

  CVideoBufferDRMPRIME* buffer = dynamic_cast<CVideoBufferDRMPRIME*>(m_videoBufferPool->Get());
  buffer->SetRef(m_pFrame);
  pVideoPicture->videoBuffer = buffer;

  return VC_PICTURE;
}
Пример #5
0
/**
 * 解码
 * @param pCodecCtx
 * @param pAvPacket
 * @param pFrame
 * @return
 */
int MP4Decoder::DecodePacket(AVCodecContext *pCodecCtx, AVPacket *pAvPacket, AVFrame *pFrame) {
    int result = avcodec_send_packet(pCodecCtx, pAvPacket);
    if (result < 0) {
        LOGE("send packet for decoding failed");
        return -1;
    }
    while (!result) {
        result = avcodec_receive_frame(pCodecCtx, pFrame);
        if (result == AVERROR(EAGAIN) || result == AVERROR_EOF) {
            return 0;
        } else if (result < 0) {
            LOGE("error during encoding %d", result);
            return -1;
        }
        sws_scale(pSwsContext,
                  (const uint8_t *const *) pFrame->data,
                  pFrame->linesize,
                  0,
                  pCodecCtx->height,
                  pFrameYUV->data,
                  pFrameYUV->linesize);

        int y_size = pCodecCtx->width * pCodecCtx->height;
        fwrite(pFrameYUV->data[0], 1, y_size, yuv_file);    //Y
        fwrite(pFrameYUV->data[1], 1, y_size / 4, yuv_file);  //U
        fwrite(pFrameYUV->data[2], 1, y_size / 4, yuv_file);  //V
        av_frame_unref(pFrame);
    }
    return 0;
}
Пример #6
0
static int decode_packet(struct mp_decode *d, int *got_frame)
{
	int ret;
	*got_frame = 0;

#ifdef USE_NEW_FFMPEG_DECODE_API
	ret = avcodec_receive_frame(d->decoder, d->frame);
	if (ret != 0 && ret != AVERROR(EAGAIN)) {
		if (ret == AVERROR_EOF)
			ret = 0;
		return ret;
	}

	if (ret != 0) {
		ret = avcodec_send_packet(d->decoder, &d->pkt);
		if (ret != 0 && ret != AVERROR(EAGAIN)) {
			if (ret == AVERROR_EOF)
				ret = 0;
			return ret;
		}

		ret = avcodec_receive_frame(d->decoder, d->frame);
		if (ret != 0 && ret != AVERROR(EAGAIN)) {
			if (ret == AVERROR_EOF)
				ret = 0;
			return ret;
		}

		*got_frame = (ret == 0);
		ret = d->pkt.size;
	} else {
		ret = 0;
		*got_frame = 1;
	}

#else
	if (d->audio) {
		ret = avcodec_decode_audio4(d->decoder,
				d->frame, got_frame, &d->pkt);
	} else {
		ret = avcodec_decode_video2(d->decoder,
				d->frame, got_frame, &d->pkt);
	}
#endif
	return ret;
}
Пример #7
0
bool ffmpeg_decode_audio(struct ffmpeg_decode *decode,
		uint8_t *data, size_t size,
		struct obs_source_audio *audio,
		bool *got_output)
{
	AVPacket packet = {0};
	int got_frame = false;
	int ret = 0;

	*got_output = false;

	copy_data(decode, data, size);

	av_init_packet(&packet);
	packet.data = decode->packet_buffer;
	packet.size = (int)size;

	if (!decode->frame) {
		decode->frame = av_frame_alloc();
		if (!decode->frame)
			return false;
	}

	if (data && size)
		ret = avcodec_send_packet(decode->decoder, &packet);
	if (ret == 0)
		ret = avcodec_receive_frame(decode->decoder, decode->frame);

	got_frame = (ret == 0);

	if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
		ret = 0;

	if (ret < 0)
		return false;
	else if (!got_frame)
		return true;

	for (size_t i = 0; i < MAX_AV_PLANES; i++)
		audio->data[i] = decode->frame->data[i];

	audio->samples_per_sec = decode->frame->sample_rate;
	audio->format          = convert_sample_format(decode->frame->format);
	audio->speakers        =
		convert_speaker_layout((uint8_t)decode->decoder->channels);

	audio->frames = decode->frame->nb_samples;

	if (audio->format == AUDIO_FORMAT_UNKNOWN)
		return false;

	*got_output = true;
	return true;
}
Пример #8
0
int COMXAudioCodecOMX::Decode(BYTE* pData, int iSize, double dts, double pts)
{
  int iBytesUsed = iSize, got_frame = 0;
  if (!m_pCodecContext) return -1;

  AVPacket avpkt;
  if (m_bGotFrame)
    return 0;

  av_init_packet(&avpkt);
  avpkt.data = pData;
  avpkt.size = iSize;
  // iBytesUsed = avcodec_decode_audio4( m_pCodecContext
  //                                                , m_pFrame1
  //                                                , &got_frame
  //                                                , &avpkt);

  avcodec_send_packet(m_pCodecContext, &avpkt);
  got_frame = avcodec_receive_frame(m_pCodecContext, m_pFrame1) == 0;

  if (/*iBytesUsed < 0 ||*/ !got_frame)
  {
    return iBytesUsed;
  }
  /* some codecs will attempt to consume more data than what we gave */
  if (iBytesUsed > iSize)
  {
    LOG_WARNING << "COMXAudioCodecOMX::Decode - decoder attempted to consume more data than given";
    iBytesUsed = iSize;
  }
  m_bGotFrame = true;

  if (m_bFirstFrame)
  {
    char log_buf[512];
    sprintf(log_buf, "COMXAudioCodecOMX::Decode(%p,%d) format=%d(%d) chan=%d samples=%d size=%d data=%p,%p,%p,%p,%p,%p,%p,%p",
             pData, iSize, m_pCodecContext->sample_fmt, m_desiredSampleFormat,
             m_pCodecContext->channels, m_pFrame1->nb_samples, m_pFrame1->linesize[0],
             m_pFrame1->data[0], m_pFrame1->data[1], m_pFrame1->data[2], m_pFrame1->data[3],
             m_pFrame1->data[4], m_pFrame1->data[5], m_pFrame1->data[6], m_pFrame1->data[7]
         );
    LOG_TRACE_2 << log_buf;
  }

  if (!m_iBufferOutputUsed)
  {
    m_dts = dts;
    m_pts = pts;
  }
  return iBytesUsed;
}
AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(QByteArray &result, int64 &samplesAdded) {
	int res;

	av_frame_unref(_frame);
	res = avcodec_receive_frame(_parentData->context, _frame);
	if (res >= 0) {
		return readFromReadyFrame(result, samplesAdded);
	}

	if (res == AVERROR_EOF) {
		return ReadResult::EndOfFile;
	} else if (res != AVERROR(EAGAIN)) {
		char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
		LOG(("Audio Error: Unable to avcodec_receive_frame() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
		return ReadResult::Error;
	}

	if (_queue.isEmpty()) {
		return _eofReached ? ReadResult::EndOfFile : ReadResult::Wait;
	}

	AVPacket packet;
	FFMpeg::packetFromDataWrap(packet, _queue.dequeue());

	_eofReached = FFMpeg::isNullPacket(packet);
	if (_eofReached) {
		avcodec_send_packet(_parentData->context, nullptr); // drain
		return ReadResult::Ok;
	}

	res = avcodec_send_packet(_parentData->context, &packet);
	if (res < 0) {
		FFMpeg::freePacket(&packet);

		char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
		LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
		// There is a sample voice message where skipping such packet
		// results in a crash (read_access to nullptr) in swr_convert().
		//if (res == AVERROR_INVALIDDATA) {
		//	return ReadResult::NotYet; // try to skip bad packet
		//}
		return ReadResult::Error;
	}
	FFMpeg::freePacket(&packet);
	return ReadResult::Ok;
}
Пример #10
0
AudioPlayerLoader::ReadResult FFMpegLoader::readMore(QByteArray &result, int64 &samplesAdded) {
	int res;

	av_frame_unref(frame);
	res = avcodec_receive_frame(codecContext, frame);
	if (res >= 0) {
		return readFromReadyFrame(result, samplesAdded);
	}

	if (res == AVERROR_EOF) {
		return ReadResult::EndOfFile;
	} else if (res != AVERROR(EAGAIN)) {
		char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
		LOG(("Audio Error: Unable to avcodec_receive_frame() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
		return ReadResult::Error;
	}

	if ((res = av_read_frame(fmtContext, &avpkt)) < 0) {
		if (res != AVERROR_EOF) {
			char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
			LOG(("Audio Error: Unable to av_read_frame() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
			return ReadResult::Error;
		}
		avcodec_send_packet(codecContext, nullptr); // drain
		return ReadResult::Ok;
	}

	if (avpkt.stream_index == streamId) {
		res = avcodec_send_packet(codecContext, &avpkt);
		if (res < 0) {
			av_packet_unref(&avpkt);

			char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
			LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
			// There is a sample voice message where skipping such packet
			// results in a crash (read_access to nullptr) in swr_convert().
			//if (res == AVERROR_INVALIDDATA) {
			//	return ReadResult::NotYet; // try to skip bad packet
			//}
			return ReadResult::Error;
		}
	}
	av_packet_unref(&avpkt);
	return ReadResult::Ok;
}
Пример #11
0
static int determine_codec_profile(struct dec_audio *da, AVPacket *pkt)
{
    struct spdifContext *spdif_ctx = da->priv;
    int profile = FF_PROFILE_UNKNOWN;
    AVCodecContext *ctx = NULL;
    AVFrame *frame = NULL;

    AVCodec *codec = avcodec_find_decoder(spdif_ctx->codec_id);
    if (!codec)
        goto done;

    frame = av_frame_alloc();
    if (!frame)
        goto done;

    ctx = avcodec_alloc_context3(codec);
    if (!ctx)
        goto done;

    if (avcodec_open2(ctx, codec, NULL) < 0) {
        av_free(ctx); // don't attempt to avcodec_close() an unopened ctx
        ctx = NULL;
        goto done;
    }

    if (avcodec_send_packet(ctx, pkt) < 0)
        goto done;
    if (avcodec_receive_frame(ctx, frame) < 0)
        goto done;

    profile = ctx->profile;

done:
    av_frame_free(&frame);
    if (ctx)
        avcodec_close(ctx);
    avcodec_free_context(&ctx);

    if (profile == FF_PROFILE_UNKNOWN)
        MP_WARN(da, "Failed to parse codec profile.\n");

    return profile;
}
Пример #12
0
int movie_player::get_frame(int stream, AVFrame* pFrame)
{
    int iError = AVERROR(EAGAIN);
    AVCodecContext* ctx;
    av_packet_queue* pq;

    if (stream == video_stream_index)
    {
        ctx = video_codec_context;
        pq = video_queue;
    }
    else if (stream == audio_stream_index)
    {
        ctx = audio_codec_context;
        pq = audio_queue;
    }
    else
    {
        throw std::invalid_argument("Invalid value provided for stream");
    }

    while (iError == AVERROR(EAGAIN))
    {
        iError = avcodec_receive_frame(ctx, pFrame);

        if (iError == AVERROR(EAGAIN))
        {
            AVPacket* pkt = pq->pull(true);
            int res = avcodec_send_packet(ctx, pkt);
            if (pkt != nullptr) {
                av_packet_unref(pkt);
                av_free(pkt);
            }

            if (res == AVERROR(EAGAIN))
            {
                throw std::runtime_error("avcodec_receive_frame and avcodec_send_packet should not return EAGAIN at the same time");
            }
        }
    }

    return iError;
}
Пример #13
0
void VideoDecoder::decodePacket(AVPacket* packet) {
	TRACE_SCOPE(tracing::CutsceneFFmpegVideoDecoder);
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	int send_result;
	do {
		send_result = avcodec_send_packet(m_status->videoCodecCtx, packet);

		while(avcodec_receive_frame(m_status->videoCodecCtx, m_decodeFrame) == 0) {
			convertAndPushPicture(m_decodeFrame);
		}
	} while (send_result == AVERROR(EAGAIN));
#else
	int finishedFrame = 0;
	auto result = avcodec_decode_video2(m_status->videoCodecCtx, m_decodeFrame, &finishedFrame, packet);

	if (result >= 0 && finishedFrame) {
		convertAndPushPicture(m_decodeFrame);
	}
#endif
}
Пример #14
0
void AudioDecoder::decodePacket(AVPacket* packet) {
	TRACE_SCOPE(tracing::CutsceneFFmpegAudioDecoder);
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	int send_result;
	do {
		send_result = avcodec_send_packet(m_status->audioCodecCtx, packet);

		while (avcodec_receive_frame(m_status->audioCodecCtx, m_decodeFrame) == 0) {
			handleDecodedFrame(m_decodeFrame);
		}
	} while (send_result == AVERROR(EAGAIN));
#else
	int finishedFrame = 0;
	auto err = avcodec_decode_audio4(m_status->audioCodecCtx, m_decodeFrame, &finishedFrame, packet);

	if (err >= 0 && finishedFrame) {
		handleDecodedFrame(m_decodeFrame);
	}
#endif
}
Пример #15
0
void AudioDecoder::finishDecoding() {
	TRACE_SCOPE(tracing::CutsceneFFmpegAudioDecoder);
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Send flush packet
	avcodec_send_packet(m_status->audioCodecCtx, nullptr);

	// Handle those decoders that have a delay
	while (true) {
		auto ret = avcodec_receive_frame(m_status->audioCodecCtx, m_decodeFrame);

		if (ret == 0) {
			handleDecodedFrame(m_decodeFrame);
		}
		else {
			// Everything consumed or error
			break;
		}
	}
#else
    // Handle those decoders that have a delay
	AVPacket nullPacket;
	memset(&nullPacket, 0, sizeof(nullPacket));
	nullPacket.data = nullptr;
	nullPacket.size = 0;

	while (true) {
		int finishedFrame = 1;
		auto err = avcodec_decode_audio4(m_status->audioCodecCtx, m_decodeFrame, &finishedFrame, &nullPacket);

		if (err < 0 || !finishedFrame) {
			break;
		}

		handleDecodedFrame(m_decodeFrame);
	}
#endif

	// Push the last bits of audio data into the queue
	flushAudioBuffer();
}
Пример #16
0
struct mp_image *load_image_png_buf(void *buffer, size_t buffer_size, int imgfmt)
{
    const AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_PNG);
    if (!codec)
        return NULL;

    AVCodecContext *avctx = avcodec_alloc_context3(codec);
    if (!avctx)
        return NULL;

    if (avcodec_open2(avctx, codec, NULL) < 0) {
        avcodec_free_context(&avctx);
        return NULL;
    }

    AVPacket *pkt = av_packet_alloc();
    if (pkt) {
        if (av_new_packet(pkt, buffer_size) >= 0)
            memcpy(pkt->data, buffer, buffer_size);
    }

    // (There is only 1 outcome: either it takes it and decodes it, or not.)
    avcodec_send_packet(avctx, pkt);
    avcodec_send_packet(avctx, NULL);

    av_packet_free(&pkt);

    struct mp_image *res = NULL;
    AVFrame *frame = av_frame_alloc();
    if (frame && avcodec_receive_frame(avctx, frame) >= 0) {
        struct mp_image *r = mp_image_from_av_frame(frame);
        if (r)
            res = convert_image(r, imgfmt, mp_null_log);
        talloc_free(r);
    }
    av_frame_free(&frame);

    avcodec_free_context(&avctx);
    return res;
}
Пример #17
0
int decode_video_frame (AVCodecContext * picc, const AVPacket * pkt, AVFrame * pf) {
    int ok = 0;

    // may need to feed multiple times to get one frame
    while (1) {
        ok = avcodec_send_packet(picc, pkt);
        if (ok != 0) {
            ok = AVUNERROR(ok);
            return ok;
        }

        ok = avcodec_receive_frame(picc, pf);
        ok = AVUNERROR(ok);
        if (ok != EAGAIN) {
            // maybe failed, just return the error code
            return ok;
        }
        // need to feed raw packet again
    }

    return ok;
}
Пример #18
0
int CFFmpegImage::DecodeFFmpegFrame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
{
  int ret;

  *got_frame = 0;

  if (pkt)
  {
    ret = avcodec_send_packet(avctx, pkt);
    // In particular, we don't expect AVERROR(EAGAIN), because we read all
    // decoded frames with avcodec_receive_frame() until done.
    if (ret < 0)
      return ret;
  }

  ret = avcodec_receive_frame(avctx, frame);
  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
    return ret;
  if (ret >= 0) // static code analysers would complain
   *got_frame = 1;

  return 0;
}
Пример #19
0
void VideoDecoder::finishDecoding() {
	TRACE_SCOPE(tracing::CutsceneFFmpegVideoDecoder);

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Send flush packet
	avcodec_send_packet(m_status->videoCodecCtx, nullptr);

	// Handle those decoders that have a delay
	while (true) {
		auto ret = avcodec_receive_frame(m_status->videoCodecCtx, m_decodeFrame);

		if (ret == 0) {
			convertAndPushPicture(m_decodeFrame);
		} else {
			// Everything consumed or error
			break;
		}
	}
#else
	// Handle those decoders that have a delay
	AVPacket nullPacket;
	memset(&nullPacket, 0, sizeof(nullPacket));
	nullPacket.data = nullptr;
	nullPacket.size = 0;

	while (true) {
		int finishedFrame = 1;
		auto err = avcodec_decode_video2(m_status->videoCodecCtx, m_decodeFrame, &finishedFrame, &nullPacket);

		if (err < 0 || !finishedFrame) {
			break;
		}

		convertAndPushPicture(m_decodeFrame);
	}
#endif
}
Пример #20
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet;
    AVFrame *frame = av_frame_alloc();
    AVFrame *filt_frame = av_frame_alloc();

    if (!frame || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }
    if (argc != 2) {
        fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
        exit(1);
    }

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;

    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
            break;

        if (packet.stream_index == audio_stream_index) {
            ret = avcodec_send_packet(dec_ctx, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
                break;
            }

            while (ret >= 0) {
                ret = avcodec_receive_frame(dec_ctx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                    break;
                } else if (ret < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
                    goto end;
                }

                if (ret >= 0) {
                    /* push the audio data from decoded frame into the filtergraph */
                    if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                        av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
                        break;
                    }

                    /* pull filtered audio from the filtergraph */
                    while (1) {
                        ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                            break;
                        if (ret < 0)
                            goto end;
                        print_frame(filt_frame);
                        av_frame_unref(filt_frame);
                    }
                    av_frame_unref(frame);
                }
            }
        }
        av_packet_unref(&packet);
    }
end:
    avfilter_graph_free(&filter_graph);
    avcodec_free_context(&dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        exit(1);
    }

    exit(0);
}
Пример #21
0
static BOOL ffmpeg_decode(AVCodecContext* dec_ctx, AVPacket* pkt,
                          AVFrame* frame,
                          AVAudioResampleContext* resampleContext,
                          AVFrame* resampled, wStream* out)
{
	int ret;
	/* send the packet with the compressed data to the decoder */
	ret = avcodec_send_packet(dec_ctx, pkt);

	if (ret < 0)
	{
		const char* err = av_err2str(ret);
		WLog_ERR(TAG, "Error submitting the packet to the decoder %s [%d]",
		         err, ret);
		return FALSE;
	}

	/* read all the output frames (in general there may be any number of them */
	while (ret >= 0)
	{
		ret = avcodec_receive_frame(dec_ctx, frame);

		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
			return TRUE;
		else if (ret < 0)
		{
			const char* err = av_err2str(ret);
			WLog_ERR(TAG, "Error during decoding %s [%d]", err, ret);
			return FALSE;
		}

		if (!avresample_is_open(resampleContext))
		{
			if ((ret = avresample_config(resampleContext, resampled, frame)) < 0)
			{
				const char* err = av_err2str(ret);
				WLog_ERR(TAG, "Error during resampling %s [%d]", err, ret);
				return FALSE;
			}

			if ((ret = (avresample_open(resampleContext))) < 0)
			{
				const char* err = av_err2str(ret);
				WLog_ERR(TAG, "Error during resampling %s [%d]", err, ret);
				return FALSE;
			}
		}

		if ((ret = avresample_convert_frame(resampleContext, resampled, frame)) < 0)
		{
			const char* err = av_err2str(ret);
			WLog_ERR(TAG, "Error during resampling %s [%d]", err, ret);
			return FALSE;
		}

		{
			const size_t data_size = resampled->channels * resampled->nb_samples * 2;
			Stream_EnsureRemainingCapacity(out, data_size);
			Stream_Write(out, resampled->data[0], data_size);
		}
	}

	return TRUE;
}
Пример #22
0
bool ThumbFinder::getFrameImage(bool needKeyFrame, int64_t requiredPTS)
{
    AVPacket pkt;
    AVFrame orig;
    AVFrame retbuf;
    memset(&orig, 0, sizeof(AVFrame));
    memset(&retbuf, 0, sizeof(AVFrame));

    av_init_packet(&pkt);

    int frameFinished = 0;
    int keyFrame;
    int frameCount = 0;
    bool gotKeyFrame = false;

    while (av_read_frame(m_inputFC, &pkt) >= 0 && !frameFinished)
    {
        if (pkt.stream_index == m_videostream)
        {
            frameCount++;

            keyFrame = pkt.flags & AV_PKT_FLAG_KEY;

            if (m_startPTS == -1 && pkt.dts != AV_NOPTS_VALUE)
            {
                m_startPTS = pkt.dts;
                m_frameTime = pkt.duration;
            }

            if (keyFrame)
                gotKeyFrame = true;

            if (!gotKeyFrame && needKeyFrame)
            {
                av_packet_unref(&pkt);
                continue;
            }

            if (m_firstIFramePTS == -1)
                m_firstIFramePTS = pkt.dts;

            av_frame_unref(m_frame);
            frameFinished = 0;
            int ret = avcodec_receive_frame(m_codecCtx, m_frame);
            if (ret == 0)
                frameFinished = 1;
            if (ret == 0 || ret == AVERROR(EAGAIN))
                ret = avcodec_send_packet(m_codecCtx, &pkt);
            if (requiredPTS != -1 && pkt.dts != AV_NOPTS_VALUE && pkt.dts < requiredPTS)
                frameFinished = false;

            m_currentPTS = pkt.dts;
        }

        av_packet_unref(&pkt);
    }

    if (frameFinished)
    {
        av_image_fill_arrays(retbuf.data, retbuf.linesize, m_outputbuf,
            AV_PIX_FMT_RGB32, m_frameWidth, m_frameHeight, IMAGE_ALIGN);
        AVFrame *tmp = m_frame;

        m_deinterlacer->DeinterlaceSingle(tmp, tmp);

        m_copy.Copy(&retbuf, AV_PIX_FMT_RGB32, tmp, m_codecCtx->pix_fmt,
                    m_frameWidth, m_frameHeight);

        QImage img(m_outputbuf, m_frameWidth, m_frameHeight,
                   QImage::Format_RGB32);

        QByteArray ffile = m_frameFile.toLocal8Bit();
        if (!img.save(ffile.constData(), "JPEG"))
        {
            LOG(VB_GENERAL, LOG_ERR, "Failed to save thumb: " + m_frameFile);
        }

        if (m_updateFrame)
        {
            MythImage *mimage =
                GetMythMainWindow()->GetCurrentPainter()->GetFormatImage();
            mimage->Assign(img);
            m_frameImage->SetImage(mimage);
            mimage->DecrRef();
        }

        updateCurrentPos();
    }

    return true;
}
Пример #23
0
static int decode_write(AVCodecContext *avctx, AVPacket *packet)
{
    AVFrame *frame = NULL, *sw_frame = NULL;
    AVFrame *tmp_frame = NULL;
    uint8_t *buffer = NULL;
    int size;
    int ret = 0;

    ret = avcodec_send_packet(avctx, packet);
    if (ret < 0) {
        fprintf(stderr, "Error during decoding\n");
        return ret;
    }

    while (ret >= 0) {
        if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
            fprintf(stderr, "Can not alloc frame\n");
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = avcodec_receive_frame(avctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            av_frame_free(&frame);
            av_frame_free(&sw_frame);
            return 0;
        } else if (ret < 0) {
            fprintf(stderr, "Error while decoding\n");
            goto fail;
        }

        if (frame->format == hw_pix_fmt) {
            /* retrieve data from GPU to CPU */
            if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
                fprintf(stderr, "Error transferring the data to system memory\n");
                goto fail;
            }
            tmp_frame = sw_frame;
        } else
            tmp_frame = frame;

        size = av_image_get_buffer_size(tmp_frame->format, tmp_frame->width,
                                        tmp_frame->height, 1);
        buffer = av_malloc(size);
        if (!buffer) {
            fprintf(stderr, "Can not alloc buffer\n");
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        ret = av_image_copy_to_buffer(buffer, size,
                                      (const uint8_t * const *)tmp_frame->data,
                                      (const int *)tmp_frame->linesize, tmp_frame->format,
                                      tmp_frame->width, tmp_frame->height, 1);
        if (ret < 0) {
            fprintf(stderr, "Can not copy image to buffer\n");
            goto fail;
        }

        if ((ret = fwrite(buffer, 1, size, output_file)) < 0) {
            fprintf(stderr, "Failed to dump raw data.\n");
            goto fail;
        }

    fail:
        av_frame_free(&frame);
        av_frame_free(&sw_frame);
        if (buffer)
            av_freep(&buffer);
        if (ret < 0)
            return ret;
    }

    return 0;
}
Пример #24
0
/*****************************************************************************
 * DecodeAudio: Called to decode one frame
 *****************************************************************************/
static block_t *DecodeAudio( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    AVCodecContext *ctx = p_sys->p_context;
    AVFrame *frame = NULL;

    if( !pp_block || !*pp_block )
        return NULL;

    block_t *p_block = *pp_block;

    if( !ctx->extradata_size && p_dec->fmt_in.i_extra && p_sys->b_delayed_open)
    {
        InitDecoderConfig( p_dec, ctx );
        OpenAudioCodec( p_dec );
    }

    if( p_sys->b_delayed_open )
        goto end;

    if( p_block->i_flags & BLOCK_FLAG_CORRUPTED )
    {
        Flush( p_dec );
        goto end;
    }

    if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY )
    {
        date_Set( &p_sys->end_date, VLC_TS_INVALID );
    }

    /* We've just started the stream, wait for the first PTS. */
    if( !date_Get( &p_sys->end_date ) && p_block->i_pts <= VLC_TS_INVALID )
        goto end;

    if( p_block->i_buffer <= 0 )
        goto end;

    if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 )
    {
        p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE );
        if( !p_block )
            return NULL;
        *pp_block = p_block;
        p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE;
        memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE );

        p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED;
    }

    frame = av_frame_alloc();
    if (unlikely(frame == NULL))
        goto end;

    for( int got_frame = 0; !got_frame; )
    {
        if( p_block->i_buffer == 0 )
            goto end;

        AVPacket pkt;
        av_init_packet( &pkt );
        pkt.data = p_block->p_buffer;
        pkt.size = p_block->i_buffer;

        int ret = avcodec_send_packet( ctx, &pkt );
        if( ret != 0 && ret != AVERROR(EAGAIN) )
        {
            msg_Warn( p_dec, "cannot decode one frame (%zu bytes)",
                      p_block->i_buffer );
            goto end;
        }
        int used = ret != AVERROR(EAGAIN) ? pkt.size : 0;

        ret = avcodec_receive_frame( ctx, frame );
        if( ret != 0 && ret != AVERROR(EAGAIN) )
        {
            msg_Warn( p_dec, "cannot decode one frame (%zu bytes)",
                      p_block->i_buffer );
            goto end;
        }
        got_frame = ret == 0;

        p_block->p_buffer += used;
        p_block->i_buffer -= used;
    }

    if( ctx->channels <= 0 || ctx->channels > 8 || ctx->sample_rate <= 0 )
    {
        msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d",
                  ctx->channels, ctx->sample_rate );
        goto end;
    }

    if( p_dec->fmt_out.audio.i_rate != (unsigned int)ctx->sample_rate )
        date_Init( &p_sys->end_date, ctx->sample_rate, 1 );

    if( p_block->i_pts > date_Get( &p_sys->end_date ) )
    {
        date_Set( &p_sys->end_date, p_block->i_pts );
    }

    if( p_block->i_buffer == 0 )
    {   /* Done with this buffer */
        block_Release( p_block );
        p_block = NULL;
        *pp_block = NULL;
    }

    /* NOTE WELL: Beyond this point, p_block refers to the DECODED block! */
    SetupOutputFormat( p_dec, true );
    if( decoder_UpdateAudioFormat( p_dec ) )
        goto drop;

    /* Interleave audio if required */
    if( av_sample_fmt_is_planar( ctx->sample_fmt ) )
    {
        p_block = block_Alloc(frame->linesize[0] * ctx->channels);
        if (unlikely(p_block == NULL))
            goto drop;

        const void *planes[ctx->channels];
        for (int i = 0; i < ctx->channels; i++)
            planes[i] = frame->extended_data[i];

        aout_Interleave(p_block->p_buffer, planes, frame->nb_samples,
                        ctx->channels, p_dec->fmt_out.audio.i_format);
        p_block->i_nb_samples = frame->nb_samples;
        av_frame_free(&frame);
    }
    else
    {
        p_block = vlc_av_frame_Wrap(frame);
        if (unlikely(p_block == NULL))
            goto drop;
        frame = NULL;
    }

    if (p_sys->b_extract)
    {   /* TODO: do not drop channels... at least not here */
        block_t *p_buffer = block_Alloc( p_dec->fmt_out.audio.i_bytes_per_frame
                                         * p_block->i_nb_samples );
        if( unlikely(p_buffer == NULL) )
            goto drop;
        aout_ChannelExtract( p_buffer->p_buffer,
                             p_dec->fmt_out.audio.i_channels,
                             p_block->p_buffer, ctx->channels,
                             p_block->i_nb_samples, p_sys->pi_extraction,
                             p_dec->fmt_out.audio.i_bitspersample );
        p_buffer->i_nb_samples = p_block->i_nb_samples;
        block_Release( p_block );
        p_block = p_buffer;
    }

    /* Silent unwanted samples */
    if( p_sys->i_reject_count > 0 )
    {
        memset( p_block->p_buffer, 0, p_block->i_buffer );
        p_sys->i_reject_count--;
    }

    p_block->i_buffer = p_block->i_nb_samples
                        * p_dec->fmt_out.audio.i_bytes_per_frame;
    p_block->i_pts = date_Get( &p_sys->end_date );
    p_block->i_length = date_Increment( &p_sys->end_date,
                                      p_block->i_nb_samples ) - p_block->i_pts;
    return p_block;

end:
    *pp_block = NULL;
drop:
    av_frame_free(&frame);
    if( p_block != NULL )
        block_Release(p_block);
    return NULL;
}
Пример #25
0
CDVDVideoCodec::VCReturn CDVDVideoCodecFFmpeg::GetPicture(VideoPicture* pVideoPicture)
{
  if (m_eof)
  {
    return VC_EOF;
  }

  // handle hw accelerators first, they may have frames ready
  if (m_pHardware)
  {
    int flags = m_codecControlFlags;
    flags &= ~DVD_CODEC_CTRL_DRAIN;
    m_pHardware->SetCodecControl(flags);
    CDVDVideoCodec::VCReturn ret = m_pHardware->Decode(m_pCodecContext, nullptr);
    if (ret == VC_PICTURE)
    {
      if (m_pHardware->GetPicture(m_pCodecContext, pVideoPicture))
        return VC_PICTURE;
      else
        return VC_ERROR;
    }
    else if (ret == VC_BUFFER)
      ;
    else
      return ret;
  }
  else if (m_pFilterGraph && !m_filterEof)
  {
    CDVDVideoCodec::VCReturn ret = FilterProcess(nullptr);
    if (ret == VC_PICTURE)
    {
      if (!SetPictureParams(pVideoPicture))
        return VC_ERROR;
      return VC_PICTURE;
    }
    else if (ret == VC_BUFFER)
      ;
    else
      return ret;
  }

  // process ffmpeg
  if (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN)
  {
    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = nullptr;
    avpkt.size = 0;
    avpkt.dts = AV_NOPTS_VALUE;
    avpkt.pts = AV_NOPTS_VALUE;
    avcodec_send_packet(m_pCodecContext, &avpkt);
  }

  int ret = avcodec_receive_frame(m_pCodecContext, m_pDecodedFrame);

  if (m_decoderState == STATE_HW_FAILED && !m_pHardware)
    return VC_REOPEN;

  if(m_iLastKeyframe < m_pCodecContext->has_b_frames + 2)
    m_iLastKeyframe = m_pCodecContext->has_b_frames + 2;

  if (ret == AVERROR_EOF)
  {
    // next drain hw accel or filter
    if (m_pHardware)
    {
      int flags = m_codecControlFlags;
      flags |= DVD_CODEC_CTRL_DRAIN;
      m_pHardware->SetCodecControl(flags);
      int ret = m_pHardware->Decode(m_pCodecContext, nullptr);
      if (ret == VC_PICTURE)
      {
        if (m_pHardware->GetPicture(m_pCodecContext, pVideoPicture))
          return VC_PICTURE;
        else
          return VC_ERROR;
      }
      else
      {
        m_eof = true;
        CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg::GetPicture - eof hw accel");
        return VC_EOF;
      }
    }
    else if (m_pFilterGraph && !m_filterEof)
    {
      int ret = FilterProcess(nullptr);
      if (ret == VC_PICTURE)
      {
        if (!SetPictureParams(pVideoPicture))
          return VC_ERROR;
        else
          return VC_PICTURE;
      }
      else
      {
        m_eof = true;
        CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg::GetPicture - eof filter");
        return VC_EOF;
      }
    }
    else
    {
      m_eof = true;
      CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg::GetPicture - eof");
      return VC_EOF;
    }
  }
  else if (ret == AVERROR(EAGAIN))
  {
    return VC_BUFFER;
  }
  else if (ret)
  {
    CLog::Log(LOGERROR, "%s - avcodec_receive_frame returned failure", __FUNCTION__);
    return VC_ERROR;
  }

  // here we got a frame
  int64_t framePTS = m_pDecodedFrame->best_effort_timestamp;

  if (m_pCodecContext->skip_frame > AVDISCARD_DEFAULT)
  {
    if (m_dropCtrl.m_state == CDropControl::VALID &&
        m_dropCtrl.m_lastPTS != AV_NOPTS_VALUE &&
        framePTS != AV_NOPTS_VALUE &&
        framePTS > (m_dropCtrl.m_lastPTS + m_dropCtrl.m_diffPTS * 1.5))
    {
      m_droppedFrames++;
      if (m_interlaced)
        m_droppedFrames++;
    }
  }
  m_dropCtrl.Process(framePTS, m_pCodecContext->skip_frame > AVDISCARD_DEFAULT);

  if (m_pDecodedFrame->key_frame)
  {
    m_started = true;
    m_iLastKeyframe = m_pCodecContext->has_b_frames + 2;
  }
  if (m_pDecodedFrame->interlaced_frame)
    m_interlaced = true;
  else
    m_interlaced = false;

  if (!m_started)
  {
    int frames = 300;
    if (m_dropCtrl.m_state == CDropControl::VALID)
      frames = static_cast<int>(6000000 / m_dropCtrl.m_diffPTS);
    if (m_iLastKeyframe >= frames && m_pDecodedFrame->pict_type == AV_PICTURE_TYPE_I)
    {
      m_started = true;
    }
    else
    {
      av_frame_unref(m_pDecodedFrame);
      return VC_BUFFER;
    }
  }

  // push the frame to hw decoder for further processing
  if (m_pHardware)
  {
    av_frame_unref(m_pFrame);
    av_frame_move_ref(m_pFrame, m_pDecodedFrame);
    CDVDVideoCodec::VCReturn ret = m_pHardware->Decode(m_pCodecContext, m_pFrame);
    if (ret == VC_FLUSHED)
    {
      Reset();
      return ret;
    }
    else if (ret == VC_FATAL)
    {
      m_decoderState = STATE_HW_FAILED;
      return VC_REOPEN;
    }
    else if (ret == VC_PICTURE)
    {
      if (m_pHardware->GetPicture(m_pCodecContext, pVideoPicture))
        return VC_PICTURE;
      else
        return VC_ERROR;
    }

    return ret;
  }
  // process filters for sw decoding
  else
  {
    SetFilters();

    bool need_scale = std::find(m_formats.begin(),
                                m_formats.end(),
                                m_pCodecContext->pix_fmt) == m_formats.end();

    bool need_reopen = false;
    if (m_filters != m_filters_next)
      need_reopen = true;

    if (!m_filters_next.empty() && m_filterEof)
      need_reopen = true;

    if (m_pFilterIn)
    {
      if (m_pFilterIn->outputs[0]->format != m_pCodecContext->pix_fmt ||
          m_pFilterIn->outputs[0]->w != m_pCodecContext->width ||
          m_pFilterIn->outputs[0]->h != m_pCodecContext->height)
        need_reopen = true;
    }

    // try to setup new filters
    if (need_reopen || (need_scale && m_pFilterGraph == nullptr))
    {
      m_filters = m_filters_next;

      if (FilterOpen(m_filters, need_scale) < 0)
        FilterClose();
    }

    if (m_pFilterGraph && !m_filterEof)
    {
      CDVDVideoCodec::VCReturn ret = FilterProcess(m_pDecodedFrame);
      if (ret != VC_PICTURE)
        return VC_NONE;
    }
    else
    {
      av_frame_unref(m_pFrame);
      av_frame_move_ref(m_pFrame, m_pDecodedFrame);
    }

    if (!SetPictureParams(pVideoPicture))
      return VC_ERROR;
    else
      return VC_PICTURE;
  }

  return VC_NONE;
}
bool FFmpegAudioReader::readFrame(AVFrame* decode_frame) {
	// FFmpeg 3.1 and onwards has a new, better packet handling API but that version is relatively new so we still need
	// to support the earlier API. The compatibility code can be removed once the new version is supported by most platforms

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Process pending frames
	auto pending_res = avcodec_receive_frame(_codec_ctx, decode_frame);

	if (pending_res == 0) {
		// Got a frame
		return true;
	}
	if (pending_res == AVERROR_EOF) {
		// No more frames available
		return false;
	}

	if (pending_res != AVERROR(EAGAIN)) {
		// Unknown error.
		return false;
	}
#else
	if (_currentPacket != nullptr) {
		// Got some data left
		int finishedFrame = 0;
		auto bytes_read = avcodec_decode_audio4(_codec_ctx, decode_frame, &finishedFrame, _currentPacket);

		if (bytes_read < 0) {
			// Error!
			return false;
		}

		_currentPacket->data += bytes_read;
		_currentPacket->size -= bytes_read;

		if (_currentPacket->size <= 0) {
			// Done with this packet
			av_packet_free(&_currentPacket);
		}

		if (finishedFrame) {
			return true;
		}
	}
#endif

	AVPacket packet;
	while (av_read_frame(_format_ctx, &packet) >= 0) {
		AVPacketScope packet_scope(&packet);
		if (packet.stream_index == _stream_idx) {
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
			auto res = avcodec_send_packet(_codec_ctx, &packet);

			if (res != 0) {
				// Error or EOF
				return false;
			}

			res = avcodec_receive_frame(_codec_ctx, decode_frame);

			if (res == 0) {
				// Got a frame
				return true;
			}
			if (res == AVERROR_EOF) {
				// No more frames available
				return false;
			}

			if (res != AVERROR(EAGAIN)) {
				// Unknown error.
				return false;
			}
			// EGAIN was returned, send new input
#else
			int finishedFrame = 0;
			auto bytes_read = avcodec_decode_audio4(_codec_ctx, decode_frame, &finishedFrame, &packet);

			if (bytes_read < packet.size) {
				// Not all data was read
				packet.data += bytes_read;
				packet.size -= bytes_read;

				_currentPacket = av_packet_clone(&packet);
			}

			if (finishedFrame) {
				return true;
			}

			if (bytes_read < 0) {
				// Error
				return false;
			}
#endif
		}
	}

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Flush decoder
	if (avcodec_send_packet(_codec_ctx, nullptr) != 0) {
		return false;
	}

	auto flush_res = avcodec_receive_frame(_codec_ctx, decode_frame);

	if (flush_res == 0) {
		// Got a frame
		return true;
	}
#else
	AVPacket nullPacket;
	memset(&nullPacket, 0, sizeof(nullPacket));
	nullPacket.data = nullptr;
	nullPacket.size = 0;

	int finishedFrame = 1;
	auto err = avcodec_decode_audio4(_codec_ctx, decode_frame, &finishedFrame, &nullPacket);

	if (finishedFrame && err >= 0) {
		return true;
	}
#endif

	// If we are here then read_frame reached the end or returned an error
	return false;
}
Пример #27
0
static void handle_packet(struct vidsrc_st *st, AVPacket *pkt)
{
	AVFrame *frame = NULL;
	struct vidframe vf;
	struct vidsz sz;
	unsigned i;

	if (st->codec) {
		int got_pict, ret;

#if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100)
		frame = av_frame_alloc();
#else
		frame = avcodec_alloc_frame();
#endif

#if LIBAVCODEC_VERSION_INT >= ((57<<16)+(37<<8)+100)

		ret = avcodec_send_packet(st->ctx, pkt);
		if (ret < 0)
			goto out;

		ret = avcodec_receive_frame(st->ctx, frame);
		if (ret < 0)
			goto out;

		got_pict = true;

#elif LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0)
		ret = avcodec_decode_video(st->ctx, frame, &got_pict,
					   pkt->data, pkt->size);
#else
		ret = avcodec_decode_video2(st->ctx, frame,
					    &got_pict, pkt);
#endif
		if (ret < 0 || !got_pict)
			goto out;

		sz.w = st->ctx->width;
		sz.h = st->ctx->height;

		/* check if size changed */
		if (!vidsz_cmp(&sz, &st->sz)) {
			info("avformat: size changed: %d x %d  ---> %d x %d\n",
			     st->sz.w, st->sz.h, sz.w, sz.h);
			st->sz = sz;
		}
	}
	else {
		/* No-codec option is not supported */
		return;
	}

#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(5<<8)+0)
	switch (frame->format) {

	case AV_PIX_FMT_YUV420P:
	case AV_PIX_FMT_YUVJ420P:
		vf.fmt = VID_FMT_YUV420P;
		break;

	default:
		warning("avformat: decode: bad pixel format"
			" (%i) (%s)\n",
			frame->format,
			av_get_pix_fmt_name(frame->format));
		goto out;
	}
#else
	vf.fmt = VID_FMT_YUV420P;
#endif

	vf.size = sz;
	for (i=0; i<4; i++) {
		vf.data[i]     = frame->data[i];
		vf.linesize[i] = frame->linesize[i];
	}

	st->frameh(&vf, st->arg);

 out:
	if (frame) {
#if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100)
		av_frame_free(&frame);
#else
		av_free(frame);
#endif
	}
}
Пример #28
0
/**
 * Decode one audio frame from the input file.
 * @param      frame                Audio frame to be decoded
 * @param      input_format_context Format context of the input file
 * @param      input_codec_context  Codec context of the input file
 * @param[out] data_present         Indicates whether data has been decoded
 * @param[out] finished             Indicates whether the end of file has
 *                                  been reached and all data has been
 *                                  decoded. If this flag is false, there
 *                                  is more data to be decoded, i.e., this
 *                                  function has to be called again.
 * @return Error code (0 if successful)
 */
int Transcode::decode_audio_frame(AVFrame *frame,
                              AVFormatContext *input_format_context,
                              AVCodecContext *input_codec_context,
                              int *data_present, int *finished)
{
    /* Packet used for temporary storage. */
    AVPacket input_packet;
    int error;
    init_packet(&input_packet);
    
    /* Read one audio frame from the input file into a temporary packet. */
    if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
        /* If we are at the end of the file, flush the decoder below. */
        if (error == AVERROR_EOF)
            *finished = 1;
        else {
            fprintf(stderr, "Could not read frame (error '%s')\n",
                    av_cplus_err2str(error));
            return error;
        }
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)    
    /* Decode the audio frame stored in the temporary packet.
     * The input audio stream decoder is used to do this.
     * If we are at the end of the file, pass an empty packet to the decoder
     * to flush it. */
    if ((error = avcodec_decode_audio4(input_codec_context, frame,
                                       data_present, &input_packet)) < 0) {
        fprintf(stderr, "Could not decode frame (error '%s')\n",
                av_cplus_err2str(error));
        av_packet_unref(&input_packet);
        return error;
    }
#else    
    int ret;
    // AVERROR(EAGAIN) means that we need to feed more
    // That we can decode Frame or Packet
    do {
        do {
            ret = avcodec_send_packet(input_codec_context, &input_packet);
        } while(ret == AVERROR(EAGAIN));

        if(ret == AVERROR_EOF || ret == AVERROR(EINVAL)) {
            printf("AVERROR(EAGAIN): %d, AVERROR_EOF: %d, AVERROR(EINVAL): %d\n", AVERROR(EAGAIN), AVERROR_EOF, AVERROR(EINVAL));
            printf("fe_read_frame: Frame getting error (%d)!\n", ret);
            return ret;
        } else {
            *data_present = 1;
        }
        ret = avcodec_receive_frame(input_codec_context, frame);
    } while(ret == AVERROR(EAGAIN));
    
    if(ret == AVERROR_EOF){
        *finished = 1;
        *data_present = 0;
    }

    if(ret == AVERROR(EINVAL)) {
        // An error or EOF occured,index break out and return what
        // we have so far.
//        printf("AVERROR(EAGAIN): %d, AVERROR_EOF: %d, AVERROR(EINVAL): %d\n", AVERROR(EAGAIN), AVERROR_EOF, AVERROR(EINVAL));
//        printf("fe_read_frame: EOF or some othere decoding error (%d)!\n", ret);
        fprintf(stderr, "Could not decode frame (error '%s')\n",
        av_cplus_err2str(ret));
        av_packet_unref(&input_packet);
        return ret;
    }
#endif
    
    

    /* If the decoder has not been flushed completely, we are not finished,
     * so that this function has to be called again. */
    if (*finished && *data_present)
        *finished = 0;
    av_packet_unref(&input_packet);
    return 0;
}
Пример #29
0
static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
                         int *got_frame, AVPacket *pkt)
{
    AVCodecInternal *avci = avctx->internal;
    int ret = 0;

    av_assert0(avci->compat_decode_consumed == 0);

    *got_frame = 0;
    avci->compat_decode = 1;

    if (avci->compat_decode_partial_size > 0 &&
        avci->compat_decode_partial_size != pkt->size) {
        av_log(avctx, AV_LOG_ERROR,
               "Got unexpected packet size after a partial decode\n");
        ret = AVERROR(EINVAL);
        goto finish;
    }

    if (!avci->compat_decode_partial_size) {
        ret = avcodec_send_packet(avctx, pkt);
        if (ret == AVERROR_EOF)
            ret = 0;
        else if (ret == AVERROR(EAGAIN)) {
            /* we fully drain all the output in each decode call, so this should not
             * ever happen */
            ret = AVERROR_BUG;
            goto finish;
        } else if (ret < 0)
            goto finish;
    }

    while (ret >= 0) {
        ret = avcodec_receive_frame(avctx, frame);
        if (ret < 0) {
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                ret = 0;
            goto finish;
        }

        if (frame != avci->compat_decode_frame) {
            if (!avctx->refcounted_frames) {
                ret = unrefcount_frame(avci, frame);
                if (ret < 0)
                    goto finish;
            }

            *got_frame = 1;
            frame = avci->compat_decode_frame;
        } else {
            if (!avci->compat_decode_warned) {
                av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
                       "API cannot return all the frames for this decoder. "
                       "Some frames will be dropped. Update your code to the "
                       "new decoding API to fix this.\n");
                avci->compat_decode_warned = 1;
            }
        }

        if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
            break;
    }

finish:
    if (ret == 0) {
        /* if there are any bsfs then assume full packet is always consumed */
        if (avctx->codec->bsfs)
            ret = pkt->size;
        else
            ret = FFMIN(avci->compat_decode_consumed, pkt->size);
    }
    avci->compat_decode_consumed = 0;
    avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;

    return ret;
}
Пример #30
0
/*****************************************************************************
 * DecodeAudio: Called to decode one frame
 *****************************************************************************/
static block_t *DecodeAudio( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    AVCodecContext *ctx = p_sys->p_context;
    AVFrame *frame = NULL;
    block_t *p_block = NULL;

    if( !ctx->extradata_size && p_dec->fmt_in.i_extra && p_sys->b_delayed_open)
    {
        InitDecoderConfig( p_dec, ctx );
        OpenAudioCodec( p_dec );
    }

    if( p_sys->b_delayed_open )
    {
        if( pp_block )
            p_block = *pp_block;
        goto drop;
    }

    /* Flushing or decoding, we return any block ready from multiple frames output */
    if( p_sys->p_decoded )
        return DequeueOneDecodedFrame( p_sys );

    if( pp_block == NULL ) /* Drain request */
    {
        /* we don't need to care about return val */
        (void) avcodec_send_packet( ctx, NULL );
    }
    else
    {
        p_block = *pp_block;
    }

    if( p_block )
    {
        if( p_block->i_flags & BLOCK_FLAG_CORRUPTED )
        {
            Flush( p_dec );
            goto drop;
        }

        if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY )
        {
            date_Set( &p_sys->end_date, VLC_TS_INVALID );
        }

        /* We've just started the stream, wait for the first PTS. */
        if( !date_Get( &p_sys->end_date ) && p_block->i_pts <= VLC_TS_INVALID )
            goto drop;

        if( p_block->i_buffer <= 0 )
            goto drop;

        if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 )
        {
            p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE );
            if( !p_block )
                return NULL;
            *pp_block = p_block;
            p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE;
            memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE );

            p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED;
        }
    }

    frame = av_frame_alloc();
    if (unlikely(frame == NULL))
        goto end;

    for( int ret = 0; ret == 0; )
    {
        /* Feed in the loop as buffer could have been full on first iterations */
        if( p_block )
        {
            AVPacket pkt;
            av_init_packet( &pkt );
            pkt.data = p_block->p_buffer;
            pkt.size = p_block->i_buffer;
            ret = avcodec_send_packet( ctx, &pkt );
            if( ret == 0 ) /* Block has been consumed */
            {
                /* Only set new pts from input block if it has been used,
                 * otherwise let it be through interpolation */
                if( p_block->i_pts > date_Get( &p_sys->end_date ) )
                {
                    date_Set( &p_sys->end_date, p_block->i_pts );
                }

                block_Release( p_block );
                *pp_block = p_block = NULL;
            }
            else if ( ret != AVERROR(EAGAIN) ) /* Errors other than buffer full */
            {
                if( ret == AVERROR(ENOMEM) || ret == AVERROR(EINVAL) )
                    goto end;
                else
                    goto drop;
            }
        }

        /* Try to read one or multiple frames */
        ret = avcodec_receive_frame( ctx, frame );
        if( ret == 0 )
        {
            /* checks and init from first decoded frame */
            if( ctx->channels <= 0 || ctx->channels > 8 || ctx->sample_rate <= 0 )
            {
                msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d",
                          ctx->channels, ctx->sample_rate );
                goto drop;
            }
            else if( p_dec->fmt_out.audio.i_rate != (unsigned int)ctx->sample_rate )
            {
                date_Init( &p_sys->end_date, ctx->sample_rate, 1 );
            }

            SetupOutputFormat( p_dec, true );
            if( decoder_UpdateAudioFormat( p_dec ) )
                goto drop;

            block_t *p_converted = ConvertAVFrame( p_dec, frame ); /* Consumes frame */
            if( p_converted )
            {
                /* Silent unwanted samples */
                if( p_sys->i_reject_count > 0 )
                {
                    memset( p_converted->p_buffer, 0, p_converted->i_buffer );
                    p_sys->i_reject_count--;
                }
                p_converted->i_buffer = p_converted->i_nb_samples
                                      * p_dec->fmt_out.audio.i_bytes_per_frame;
                p_converted->i_pts = date_Get( &p_sys->end_date );
                p_converted->i_length = date_Increment( &p_sys->end_date,
                                                      p_converted->i_nb_samples ) - p_converted->i_pts;

                block_ChainLastAppend( &p_sys->pp_decoded_last, p_converted );
            }

            /* Prepare new frame */
            frame = av_frame_alloc();
            if (unlikely(frame == NULL))
                break;
        }
        else av_frame_free( &frame );
    };

    return ( p_sys->p_decoded ) ? DequeueOneDecodedFrame( p_sys ) : NULL;

end:
    p_dec->b_error = true;
    if( pp_block )
    {
        assert( *pp_block == p_block );
        *pp_block = NULL;
    }
drop:
    if( p_block != NULL )
        block_Release(p_block);
    if( frame != NULL )
        av_frame_free( &frame );
    return NULL;
}