status_t AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time) { status_t ret = B_OK; // Reset the FFmpeg codec to flush buffers, so we keep the sync if (fCodecInitDone) { avcodec_flush_buffers(fContext); _ResetTempPacket(); } // Flush internal buffers as well. free(fVideoChunkBuffer); // TODO: Replace with fChunkBuffer, once audio path is // responsible for freeing the chunk buffer, too. fVideoChunkBuffer = NULL; fChunkBuffer = NULL; fChunkBufferOffset = 0; fChunkBufferSize = 0; fOutputBufferOffset = 0; fOutputBufferSize = 0; fDecodedDataSizeInBytes = 0; fFrame = frame; fStartTime = time; return ret; }
status_t AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time) { status_t ret = B_OK; // Reset the FFmpeg codec to flush buffers, so we keep the sync if (fCodecInitDone) { avcodec_flush_buffers(fContext); _ResetTempPacket(); } // Flush internal buffers as well. fChunkBuffer = NULL; fChunkBufferOffset = 0; fChunkBufferSize = 0; fOutputBufferOffset = 0; fOutputBufferSize = 0; fDecodedDataSizeInBytes = 0; fFrame = frame; fStartTime = time; return ret; }
status_t AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat) { TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n"); TRACE(" requested video format 0x%x\n", inOutFormat->u.raw_video.display.format); // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the // default colordepth is RGB32). if (inOutFormat->u.raw_video.display.format == B_YCbCr422) fOutputColorSpace = B_YCbCr422; else fOutputColorSpace = B_RGB32; #if USE_SWS_FOR_COLOR_SPACE_CONVERSION if (fSwsContext != NULL) sws_freeContext(fSwsContext); fSwsContext = NULL; #else fFormatConversionFunc = 0; #endif fContext->extradata = (uint8_t*)fExtraData; fContext->extradata_size = fExtraDataSize; bool codecCanHandleIncompleteFrames = (fCodec->capabilities & CODEC_CAP_TRUNCATED) != 0; if (codecCanHandleIncompleteFrames) { // Expect and handle video frames to be splitted across consecutive // data chunks. fContext->flags |= CODEC_FLAG_TRUNCATED; } // close any previous instance if (fCodecInitDone) { fCodecInitDone = false; avcodec_close(fContext); } if (avcodec_open2(fContext, fCodec, NULL) >= 0) fCodecInitDone = true; else { TRACE("avcodec_open() failed to init codec!\n"); return B_ERROR; } _ResetTempPacket(); status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame(); if (statusOfDecodingFirstFrame != B_OK) { TRACE("[v] decoding first video frame failed\n"); return B_ERROR; } // Note: fSwsContext / fFormatConversionFunc should have been initialized // by first call to _DecodeNextVideoFrame() above. #if USE_SWS_FOR_COLOR_SPACE_CONVERSION if (fSwsContext == NULL) { TRACE("No SWS Scale context or decoder has not set the pixel format " "yet!\n"); } #else if (fFormatConversionFunc == NULL) { TRACE("no pixel format conversion function found or decoder has " "not set the pixel format yet!\n"); } #endif inOutFormat->type = B_MEDIA_RAW_VIDEO; inOutFormat->require_flags = 0; inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output; inOutFormat->u.raw_video.interlace = 1; // Progressive (non-interlaced) video frames are delivered inOutFormat->u.raw_video.first_active = fHeader.u.raw_video.first_active_line; inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count; inOutFormat->u.raw_video.pixel_width_aspect = fHeader.u.raw_video.pixel_width_aspect; inOutFormat->u.raw_video.pixel_height_aspect = fHeader.u.raw_video.pixel_height_aspect; inOutFormat->u.raw_video.field_rate = fOutputFrameRate; // Was calculated by first call to _DecodeNextVideoFrame() inOutFormat->u.raw_video.display.format = fOutputColorSpace; inOutFormat->u.raw_video.display.line_width = fHeader.u.raw_video.display_line_width; inOutFormat->u.raw_video.display.line_count = fHeader.u.raw_video.display_line_count; inOutFormat->u.raw_video.display.bytes_per_row = fHeader.u.raw_video.bytes_per_row; #ifdef TRACE_AV_CODEC char buffer[1024]; string_for_format(*inOutFormat, buffer, sizeof(buffer)); TRACE("[v] outFormat = %s\n", buffer); TRACE(" returned video format 0x%x\n", inOutFormat->u.raw_video.display.format); #endif return B_OK; }
status_t AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat) { TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n"); media_multi_audio_format outputAudioFormat; outputAudioFormat = media_raw_audio_format::wildcard; outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN; outputAudioFormat.frame_rate = fInputFormat.u.encoded_audio.output.frame_rate; outputAudioFormat.channel_count = fInputFormat.u.encoded_audio.output.channel_count; outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format; outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size; // Check that format is not still a wild card! if (outputAudioFormat.format == 0) { TRACE(" format still a wild-card, assuming B_AUDIO_SHORT.\n"); outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT; } size_t sampleSize = outputAudioFormat.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; // Check that channel count is not still a wild card! if (outputAudioFormat.channel_count == 0) { TRACE(" channel_count still a wild-card, assuming stereo.\n"); outputAudioFormat.channel_count = 2; } if (outputAudioFormat.buffer_size == 0) { outputAudioFormat.buffer_size = 512 * sampleSize * outputAudioFormat.channel_count; } inOutFormat->type = B_MEDIA_RAW_AUDIO; inOutFormat->u.raw_audio = outputAudioFormat; fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate; fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size; fContext->sample_rate = (int)fInputFormat.u.encoded_audio.output.frame_rate; fContext->channels = outputAudioFormat.channel_count; fContext->block_align = fBlockAlign; fContext->extradata = (uint8_t*)fExtraData; fContext->extradata_size = fExtraDataSize; // TODO: This probably needs to go away, there is some misconception // about extra data / info buffer and meta data. See // Reader::GetStreamInfo(). The AVFormatReader puts extradata and // extradata_size into media_format::MetaData(), but used to ignore // the infoBuffer passed to GetStreamInfo(). I think this may be why // the code below was added. if (fInputFormat.MetaDataSize() > 0) { fContext->extradata = (uint8_t*)fInputFormat.MetaData(); fContext->extradata_size = fInputFormat.MetaDataSize(); } TRACE(" bit_rate %d, sample_rate %d, channels %d, block_align %d, " "extradata_size %d\n", fContext->bit_rate, fContext->sample_rate, fContext->channels, fContext->block_align, fContext->extradata_size); // close any previous instance if (fCodecInitDone) { fCodecInitDone = false; avcodec_close(fContext); } // open new int result = avcodec_open2(fContext, fCodec, NULL); fCodecInitDone = (result >= 0); fStartTime = 0; fOutputFrameSize = sampleSize * outputAudioFormat.channel_count; fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize; fOutputFrameRate = outputAudioFormat.frame_rate; TRACE(" bit_rate = %d, sample_rate = %d, channels = %d, init = %d, " "output frame size: %d, count: %ld, rate: %.2f\n", fContext->bit_rate, fContext->sample_rate, fContext->channels, result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate); fChunkBuffer = NULL; fChunkBufferOffset = 0; fChunkBufferSize = 0; fAudioDecodeError = false; fOutputBufferOffset = 0; fOutputBufferSize = 0; _ResetTempPacket(); inOutFormat->require_flags = 0; inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; if (!fCodecInitDone) { TRACE("avcodec_open() failed!\n"); return B_ERROR; } return B_OK; }