Esempio n. 1
0
/*! \brief Decode next video frame

    We decode exactly one video frame into fDecodedData. To achieve this goal,
    we might need to request several chunks of encoded data resulting in a
    variable execution time of this function.

    The length of the decoded video frame is stored in
    fDecodedDataSizeInBytes. If this variable is greater than zero, you can
    assert that there is a valid video frame available in fDecodedData.

    The decoded video frame in fDecodedData has color space conversion and
    deinterlacing already applied.

    To every decoded video frame there is a media_header populated in
    fHeader, containing the corresponding video frame properties.
    
	Normally every decoded video frame has a start_time field populated in the
	associated fHeader, that determines the presentation time of the frame.
	This relationship will only hold true, when each data chunk that is
	provided via GetNextChunk() contains data for exactly one encoded video
	frame (one complete frame) - not more and not less.

	We can decode data chunks that contain partial video frame data, too. In
	that case, you cannot trust the value of the start_time field in fHeader.
	We simply have no logic in place to establish a meaningful relationship
	between an incomplete frame and the start time it should be presented.
	Though this	might change in the future.

	We can decode data chunks that contain more than one video frame, too. In
	that case, you cannot trust the value of the start_time field in fHeader.
	We simply have no logic in place to track the start_time across multiple
	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
	and the start time it should be presented isn't established at the moment.
	Though this	might change in the future.

    More over the fOutputFrameRate variable is updated for every decoded video
    frame.

	On first call, the member variables 	fSwsContext / fFormatConversionFunc
	are initialized.

	\return B_OK, when we successfully decoded one video frame
 */
status_t
AVCodecDecoder::_DecodeNextVideoFrame()
{
	assert(fTempPacket.size >= 0);

	while (true) {
		media_header chunkMediaHeader;

		if (fTempPacket.size == 0) {
			// Our packet buffer is empty, so fill it now.
			status_t getNextChunkStatus = GetNextChunk(&fChunkBuffer,
				&fChunkBufferSize, &chunkMediaHeader);
			switch (getNextChunkStatus) {
				case B_OK:
					break;

				case B_LAST_BUFFER_ERROR:
					return _FlushOneVideoFrameFromDecoderBuffer();

				default:
					TRACE("AVCodecDecoder::_DecodeNextVideoFrame(): error from "
						"GetNextChunk(): %s\n", strerror(err));
					return getNextChunkStatus;
			}

			fTempPacket.data = static_cast<uint8_t*>(const_cast<void*>(
				fChunkBuffer));
			fTempPacket.size = fChunkBufferSize;

			fContext->reordered_opaque = chunkMediaHeader.start_time;
				// Let ffmpeg handle the relationship between start_time and
				// decoded video frame.
				//
				// Explanation:
				// The received chunk buffer may not contain the next video
				// frame to be decoded, due to frame reordering (e.g. MPEG1/2
				// provides encoded video frames in a different order than the
				// decoded video frame).
				//
				// FIXME: Research how to establish a meaningful relationship
				// between start_time and decoded video frame when the received
				// chunk buffer contains partial video frames. Maybe some data
				// formats contain time stamps (ake pts / dts fields) that can
				// be evaluated by FFMPEG. But as long as I don't have such
				// video data to test it, it makes no sense to implement it.
				//
				// FIXME: Implement tracking start_time of video frames
				// originating in data chunks that encode more than one video
				// frame at a time. In that case on would increment the
				// start_time for each consecutive frame of such a data chunk
				// (like it is done for audio frame decoding). But as long as
				// I don't have such video data to test it, it makes no sense
				// to implement it.

#ifdef LOG_STREAM_TO_FILE
			if (sDumpedPackets < 100) {
				sStreamLogFile.Write(fChunkBuffer, fChunkBufferSize);
				printf("wrote %ld bytes\n", fChunkBufferSize);
				sDumpedPackets++;
			} else if (sDumpedPackets == 100)
				sStreamLogFile.Unset();
#endif
		}

#if DO_PROFILING
		bigtime_t startTime = system_time();
#endif

		// NOTE: In the FFMPEG 0.10.2 code example decoding_encoding.c, the
		// length returned by avcodec_decode_video2() is used to update the
		// packet buffer size (here it is fTempPacket.size). This way the
		// packet buffer is allowed to contain incomplete frames so we are
		// required to buffer the packets between different calls to
		// _DecodeNextVideoFrame().
		int gotPicture = 0;
		int decodedDataSizeInBytes = avcodec_decode_video2(fContext,
			fRawDecodedPicture, &gotPicture, &fTempPacket);
		if (decodedDataSizeInBytes < 0) {
			TRACE("[v] AVCodecDecoder: ignoring error in decoding frame %lld:"
				" %d\n", fFrame, len);
			// NOTE: An error from avcodec_decode_video2() is ignored by the
			// FFMPEG 0.10.2 example decoding_encoding.c. Only the packet
			// buffers are flushed accordingly
			fTempPacket.data = NULL;
			fTempPacket.size = 0;
			continue;
		}

		fTempPacket.size -= decodedDataSizeInBytes;
		fTempPacket.data += decodedDataSizeInBytes;

		bool gotNoPictureYet = gotPicture == 0;
		if (gotNoPictureYet) {
			TRACE("frame %lld - no picture yet, decodedDataSizeInBytes: %d, "
				"chunk size: %ld\n", fFrame, decodedDataSizeInBytes,
				fChunkBufferSize);
			continue;
		}

#if DO_PROFILING
		bigtime_t formatConversionStart = system_time();
#endif

		_HandleNewVideoFrameAndUpdateSystemState();

#if DO_PROFILING
		bigtime_t doneTime = system_time();
		decodingTime += formatConversionStart - startTime;
		conversionTime += doneTime - formatConversionStart;
		profileCounter++;
		if (!(fFrame % 5)) {
			if (info) {
				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
					"%Ld\n",
					decodingTime / profileCounter,
					conversionTime / profileCounter,
					fFrame, info->time_to_decode);
			} else {
				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
					"%Ld\n",
					decodingTime / profileCounter,
					conversionTime / profileCounter,
					fFrame, bigtime_t(1000000LL / fOutputFrameRate));
			}
			decodingTime = 0;
			conversionTime = 0;
			profileCounter = 0;
		}
#endif
		return B_OK;
	}
}
Esempio n. 2
0
/*! \brief Decodes next video frame.

    We decode exactly one video frame into fDecodedData. To achieve this goal,
    we might need to request several chunks of encoded data resulting in a
    variable execution time of this function.

    The length of the decoded video frame is stored in
    fDecodedDataSizeInBytes. If this variable is greater than zero, you can
    assert that there is a valid video frame available in fDecodedData.

    The decoded video frame in fDecodedData has color space conversion and
    deinterlacing already applied.

    To every decoded video frame there is a media_header populated in
    fHeader, containing the corresponding video frame properties.

	Normally every decoded video frame has a start_time field populated in the
	associated fHeader, that determines the presentation time of the frame.
	This relationship will only hold true, when each data chunk that is
	provided via GetNextChunk() contains data for exactly one encoded video
	frame (one complete frame) - not more and not less.

	We can decode data chunks that contain partial video frame data, too. In
	that case, you cannot trust the value of the start_time field in fHeader.
	We simply have no logic in place to establish a meaningful relationship
	between an incomplete frame and the start time it should be presented.
	Though this	might change in the future.

	We can decode data chunks that contain more than one video frame, too. In
	that case, you cannot trust the value of the start_time field in fHeader.
	We simply have no logic in place to track the start_time across multiple
	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
	and the start time it should be presented isn't established at the moment.
	Though this	might change in the future.

    More over the fOutputFrameRate variable is updated for every decoded video
    frame.

	On first call the member variables fSwsContext / fFormatConversionFunc	are
	initialized.

	\returns B_OK when we successfully decoded one video frame
	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
	\returns B_NO_MEMORY when we have no memory left for correct operation.
	\returns Other Errors
 */
status_t
AVCodecDecoder::_DecodeNextVideoFrame()
{
	assert(fTempPacket.size >= 0);

	while (true) {
		status_t loadingChunkStatus
			= _LoadNextVideoChunkIfNeededAndAssignStartTime();

		if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
			return _FlushOneVideoFrameFromDecoderBuffer();

		if (loadingChunkStatus != B_OK) {
			TRACE("AVCodecDecoder::_DecodeNextVideoFrame(): error from "
				"GetNextChunk(): %s\n", strerror(err));
			return loadingChunkStatus;
		}

#if DO_PROFILING
		bigtime_t startTime = system_time();
#endif

		// NOTE: In the FFMPEG 0.10.2 code example decoding_encoding.c, the
		// length returned by avcodec_decode_video2() is used to update the
		// packet buffer size (here it is fTempPacket.size). This way the
		// packet buffer is allowed to contain incomplete frames so we are
		// required to buffer the packets between different calls to
		// _DecodeNextVideoFrame().
		int gotVideoFrame = 0;
		int decodedDataSizeInBytes = avcodec_decode_video2(fContext,
			fRawDecodedPicture, &gotVideoFrame, &fTempPacket);
		if (decodedDataSizeInBytes < 0) {
			TRACE("[v] AVCodecDecoder: ignoring error in decoding frame %lld:"
				" %d\n", fFrame, len);
			// NOTE: An error from avcodec_decode_video2() is ignored by the
			// FFMPEG 0.10.2 example decoding_encoding.c. Only the packet
			// buffers are flushed accordingly
			fTempPacket.data = NULL;
			fTempPacket.size = 0;
			continue;
		}

		fTempPacket.size -= decodedDataSizeInBytes;
		fTempPacket.data += decodedDataSizeInBytes;

		bool gotNoVideoFrame = gotVideoFrame == 0;
		if (gotNoVideoFrame) {
			TRACE("frame %lld - no picture yet, decodedDataSizeInBytes: %d, "
				"chunk size: %ld\n", fFrame, decodedDataSizeInBytes,
				fChunkBufferSize);
			continue;
		}

#if DO_PROFILING
		bigtime_t formatConversionStart = system_time();
#endif

		_HandleNewVideoFrameAndUpdateSystemState();

#if DO_PROFILING
		bigtime_t doneTime = system_time();
		decodingTime += formatConversionStart - startTime;
		conversionTime += doneTime - formatConversionStart;
		profileCounter++;
		if (!(fFrame % 5)) {
			if (info) {
				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
					"%Ld\n",
					decodingTime / profileCounter,
					conversionTime / profileCounter,
					fFrame, info->time_to_decode);
			} else {
				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
					"%Ld\n",
					decodingTime / profileCounter,
					conversionTime / profileCounter,
					fFrame, bigtime_t(1000000LL / fOutputFrameRate));
			}
			decodingTime = 0;
			conversionTime = 0;
			profileCounter = 0;
		}
#endif
		return B_OK;
	}
}