nsresult AppleVTDecoder::SubmitFrame(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); mInputIncoming--; // For some reason this gives me a double-free error with stagefright. AutoCFRelease<CMBlockBufferRef> block = nullptr; AutoCFRelease<CMSampleBufferRef> sample = nullptr; VTDecodeInfoFlags infoFlags; OSStatus rv; // FIXME: This copies the sample data. I think we can provide // a custom block source which reuses the aSample buffer. // But note that there may be a problem keeping the samples // alive over multiple frames. rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator. const_cast<uint8_t*>(aSample->Data()), aSample->Size(), kCFAllocatorNull, // Block allocator. NULL, // Block source. 0, // Data offset. aSample->Size(), false, block.receive()); if (rv != noErr) { NS_ERROR("Couldn't create CMBlockBuffer"); return NS_ERROR_FAILURE; } CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample); rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive()); if (rv != noErr) { NS_ERROR("Couldn't create CMSampleBuffer"); return NS_ERROR_FAILURE; } mQueuedSamples++; VTDecodeFrameFlags decodeFlags = kVTDecodeFrame_EnableAsynchronousDecompression; rv = VTDecompressionSessionDecodeFrame(mSession, sample, decodeFlags, CreateAppleFrameRef(aSample), &infoFlags); if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) { LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv); NS_WARNING("Couldn't pass frame to decoder"); mCallback->Error(); return NS_ERROR_FAILURE; } // Ask for more data. if (!mInputIncoming && mQueuedSamples <= mMaxRefFrames) { LOG("AppleVTDecoder task queue empty; requesting more data"); mCallback->InputExhausted(); } return NS_OK; }
nsresult AppleVTDecoder::SubmitFrame(mp4_demuxer::MP4Sample* aSample) { // For some reason this gives me a double-free error with stagefright. AutoCFRelease<CMBlockBufferRef> block = nullptr; AutoCFRelease<CMSampleBufferRef> sample = nullptr; VTDecodeInfoFlags flags; OSStatus rv; // FIXME: This copies the sample data. I think we can provide // a custom block source which reuses the aSample buffer. // But note that there may be a problem keeping the samples // alive over multiple frames. rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault // Struct allocator. ,aSample->data ,aSample->size ,kCFAllocatorNull // Block allocator. ,NULL // Block source. ,0 // Data offset. ,aSample->size ,false ,block.receive()); if (rv != noErr) { NS_ERROR("Couldn't create CMBlockBuffer"); return NS_ERROR_FAILURE; } CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample); rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive()); if (rv != noErr) { NS_ERROR("Couldn't create CMSampleBuffer"); return NS_ERROR_FAILURE; } rv = VTDecompressionSessionDecodeFrame(mSession, sample, 0, CreateAppleFrameRef(aSample), &flags); if (rv != noErr) { NS_WARNING("Couldn't pass frame to decoder"); return NS_ERROR_FAILURE; } // Ask for more data. if (mTaskQueue->IsEmpty()) { LOG("AppleVTDecoder task queue empty; requesting more data"); mCallback->InputExhausted(); } return NS_OK; }