void dump_cvpixel_buffer (CVPixelBufferRef pixbuf) { gsize left, right, top, bottom; GST_LOG ("buffer %p", pixbuf); if (CVPixelBufferLockBaseAddress (pixbuf, 0)) { GST_WARNING ("Couldn't lock base adress on pixel buffer !"); return; } GST_LOG ("Width:%" G_GSIZE_FORMAT " , Height:%" G_GSIZE_FORMAT, CVPixelBufferGetWidth (pixbuf), CVPixelBufferGetHeight (pixbuf)); GST_LOG ("Format:%" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (CVPixelBufferGetPixelFormatType (pixbuf))); GST_LOG ("base address:%p", CVPixelBufferGetBaseAddress (pixbuf)); GST_LOG ("Bytes per row:%" G_GSIZE_FORMAT, CVPixelBufferGetBytesPerRow (pixbuf)); GST_LOG ("Data Size:%" G_GSIZE_FORMAT, CVPixelBufferGetDataSize (pixbuf)); GST_LOG ("Plane count:%" G_GSIZE_FORMAT, CVPixelBufferGetPlaneCount (pixbuf)); CVPixelBufferGetExtendedPixels (pixbuf, &left, &right, &top, &bottom); GST_LOG ("Extended pixels. left/right/top/bottom : %" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT, left, right, top, bottom); CVPixelBufferUnlockBaseAddress (pixbuf, 0); }
OSErr qQuickTimeDecoderCallback(void *decompressionTrackingRefCon, OSStatus result, ICMDecompressionTrackingFlags decompressionTrackingFlags, CVPixelBufferRef pixelBuffer, TimeValue64 displayTime, TimeValue64 displayDuration, ICMValidTimeFlags validTimeFlags, void *reserved, void *sourceFrameRefCon) { OSStatus err; // The decompressionTrackingRefCon might actually be a QCamera or a QDecoder, but we are // careful to ensure that they begin with the same layout as QDecoderCallbackData. QDecoder* decoder = (QDecoder*)decompressionTrackingRefCon; // Declare up here because we need to compile on archaic GCC on Win32 void* base; size_t width; size_t height; size_t size; // fprintf(QSTDERR, "\n\tdecode %d ", decoder->outFrameCount); if (!pixelBuffer) { fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no pixel buffer (why?)"); return noErr; } if (!(kICMDecompressionTracking_EmittingFrame & decompressionTrackingFlags)) { fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no frame emitted (why?)"); return noErr; } decoder->outFrameCount++; // Lock the pixel-buffer until we're done with it. err = CVPixelBufferLockBaseAddress(pixelBuffer, 0); if (err != noErr) { fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): can't lock CVPixelBuffer"); // XXXX: so what do we do about it? return err; } // Get info about the raw pixel-buffer data. base = (void*)CVPixelBufferGetBaseAddress(pixelBuffer); width = CVPixelBufferGetWidth(pixelBuffer); height = CVPixelBufferGetHeight(pixelBuffer); // size = width*height*4; size = height * CVPixelBufferGetBytesPerRow(pixelBuffer); // Stash the data so that Squeak can retrieve it. qStoreCallbackData(base, &(decoder->callbackData), size); // We're done with the pixel-buffer CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); // Signal the semaphore so that Squeak can grab the data that we just stashed. interpreterProxy->signalSemaphoreWithIndex(decoder->semaIndex); return noErr; }
VideoFrame VideoDecoderVDA::frame() { DPTR_D(VideoDecoderVDA); CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3]; if (!cv_buffer) { qDebug("Frame buffer is empty."); return VideoFrame(); } if (CVPixelBufferGetDataSize(cv_buffer) <= 0) { qDebug("Empty frame buffer"); return VideoFrame(); } VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer)); if (pixfmt == VideoFormat::Format_Invalid) { qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer)); return VideoFrame(); } // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture() class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop { bool glinterop; CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed public: SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) { //CVPixelBufferRetain(cvbuf); } ~SurfaceInteropCVBuffer() { CVPixelBufferRelease(cvbuf); } void* mapToHost(const VideoFormat &format, void *handle, int plane) { Q_UNUSED(plane); CVPixelBufferLockBaseAddress(cvbuf, 0); const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf))); if (!fmt.isValid()) { CVPixelBufferUnlockBaseAddress(cvbuf, 0); return NULL; } const int w = CVPixelBufferGetWidth(cvbuf); const int h = CVPixelBufferGetHeight(cvbuf); uint8_t *src[3]; int pitch[3]; for (int i = 0; i <fmt.planeCount(); ++i) { // get address results in internal copy src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i); pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i); } CVPixelBufferUnlockBaseAddress(cvbuf, 0); //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch)); if (fmt != format) frame = frame.to(format); VideoFrame *f = reinterpret_cast<VideoFrame*>(handle); frame.setTimestamp(f->timestamp()); frame.setDisplayAspectRatio(f->displayAspectRatio()); *f = frame; return f; }
virtual Coord2DI size() { if (m_ref) { return Coord2DI((short)CVPixelBufferGetWidth(m_ref), (short)CVPixelBufferGetHeight(m_ref)); } else return Coord2DI(0,0); }
void AspectTransform::pushBuffer(const uint8_t *const data, size_t size, videocore::IMetadata &metadata) { auto output = m_output.lock(); if(output) { CVPixelBufferRef pb = (CVPixelBufferRef)data; CVPixelBufferLockBaseAddress(pb, kCVPixelBufferLock_ReadOnly); float width = CVPixelBufferGetWidth(pb); float height = CVPixelBufferGetHeight(pb); if(width != m_prevWidth || height != m_prevHeight) { setBoundingBoxDirty(); m_prevHeight = height; m_prevWidth = width; } if(m_boundingBoxDirty) { // TODO: Replace CVPixelBufferRef with an internal format. float wfac = float(m_boundingWidth) / width; float hfac = float(m_boundingHeight) / height; const float mult = (m_aspectMode == kAspectFit ? (wfac < hfac) : (wfac > hfac)) ? wfac : hfac; wfac = width*mult / float(m_boundingWidth); hfac = height*mult / float(m_boundingHeight); m_scale = glm::vec3(wfac,hfac,1.f); m_boundingBoxDirty = false; } CVPixelBufferUnlockBaseAddress(pb, kCVPixelBufferLock_ReadOnly); videocore::VideoBufferMetadata& md = dynamic_cast<videocore::VideoBufferMetadata&>(metadata); glm::mat4 & mat = md.getData<videocore::kVideoMetadataMatrix>(); mat = glm::scale(mat, m_scale); output->pushBuffer(data, size, metadata); } }
static void _frame_decompressed(void *decompressionTrackingRefCon, OSStatus err, ICMDecompressionTrackingFlags dtf, CVPixelBufferRef pixelBuffer, TimeValue64 displayTime, TimeValue64 displayDuration, ICMValidTimeFlags validTimeFlags, void *reserved, void *sourceFrameRefCon) { dbg_printf("[ vOE] >> [%08lx] :: _frame_decompressed()\n", (UInt32) -1); if (!err) { StreamInfoPtr si = (StreamInfoPtr) decompressionTrackingRefCon; if (dtf & kICMDecompressionTracking_ReleaseSourceData) { // if we were responsible for managing source data buffers, // we should release the source buffer here, // using sourceFrameRefCon to identify it. } if ((dtf & kICMDecompressionTracking_EmittingFrame) && pixelBuffer) { ICMCompressionFrameOptionsRef frameOptions = NULL; OSType pf = CVPixelBufferGetPixelFormatType(pixelBuffer); dbg_printf("[ vOE] > [%08lx] :: _frame_decompressed() = %ld; %ld," " %lld, %lld, %ld [%ld '%4.4s' (%ld x %ld)]\n", (UInt32) -1, err, dtf, displayTime, displayDuration, validTimeFlags, CVPixelBufferGetDataSize(pixelBuffer), (char *) &pf, CVPixelBufferGetWidth(pixelBuffer), CVPixelBufferGetHeight(pixelBuffer)); displayDuration = 25; //? // Feed the frame to the compression session. err = ICMCompressionSessionEncodeFrame(si->si_v.cs, pixelBuffer, displayTime, displayDuration, validTimeFlags, frameOptions, NULL, NULL ); } } dbg_printf("[ vOE] < [%08lx] :: _frame_decompressed() = %ld\n", (UInt32) -1, err); }
Surface8uRef convertCVPixelBufferToSurface( CVPixelBufferRef pixelBufferRef ) { CVPixelBufferLockBaseAddress( pixelBufferRef, 0 ); uint8_t *ptr = reinterpret_cast<uint8_t*>( CVPixelBufferGetBaseAddress( pixelBufferRef ) ); int32_t rowBytes = CVPixelBufferGetBytesPerRow( pixelBufferRef ); OSType type = CVPixelBufferGetPixelFormatType( pixelBufferRef ); size_t width = CVPixelBufferGetWidth( pixelBufferRef ); size_t height = CVPixelBufferGetHeight( pixelBufferRef ); SurfaceChannelOrder sco; if( type == k24RGBPixelFormat ) sco = SurfaceChannelOrder::RGB; else if( type == k32ARGBPixelFormat ) sco = SurfaceChannelOrder::ARGB; else if( type == k24BGRPixelFormat ) sco = SurfaceChannelOrder::BGR; else if( type == k32BGRAPixelFormat ) sco = SurfaceChannelOrder::BGRA; Surface8u *newSurface = new Surface8u( ptr, width, height, rowBytes, sco ); return Surface8uRef( newSurface, [=] ( Surface8u *s ) { ::CVBufferRelease( pixelBufferRef ); delete s; } ); }
Surface8u convertCVPixelBufferToSurface( CVPixelBufferRef pixelBufferRef ) { CVPixelBufferLockBaseAddress( pixelBufferRef, 0 ); uint8_t *ptr = reinterpret_cast<uint8_t*>( CVPixelBufferGetBaseAddress( pixelBufferRef ) ); int32_t rowBytes = CVPixelBufferGetBytesPerRow( pixelBufferRef ); OSType type = CVPixelBufferGetPixelFormatType( pixelBufferRef ); size_t width = CVPixelBufferGetWidth( pixelBufferRef ); size_t height = CVPixelBufferGetHeight( pixelBufferRef ); SurfaceChannelOrder sco; if( type == k24RGBPixelFormat ) sco = SurfaceChannelOrder::RGB; else if( type == k32ARGBPixelFormat ) sco = SurfaceChannelOrder::ARGB; else if( type == k24BGRPixelFormat ) sco = SurfaceChannelOrder::BGR; else if( type == k32BGRAPixelFormat ) sco = SurfaceChannelOrder::BGRA; Surface result( ptr, width, height, rowBytes, sco ); result.setDeallocator( CVPixelBufferDealloc, pixelBufferRef ); return result; }
static gboolean gst_video_info_init_from_pixel_buffer (GstVideoInfo * info, CVPixelBufferRef pixel_buf) { size_t width, height; OSType format_type; GstVideoFormat video_format; width = CVPixelBufferGetWidth (pixel_buf); height = CVPixelBufferGetHeight (pixel_buf); format_type = CVPixelBufferGetPixelFormatType (pixel_buf); video_format = gst_core_media_buffer_get_video_format (format_type); if (video_format == GST_VIDEO_FORMAT_UNKNOWN) { return FALSE; } gst_video_info_init (info); gst_video_info_set_format (info, video_format, width, height); return TRUE; }
// Copy and return a decoded frame. nsresult AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage, AppleVTDecoder::AppleFrameRef aFrameRef) { if (mIsShutDown || mIsFlushing) { // We are in the process of flushing or shutting down; ignore frame. return NS_OK; } LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s", aFrameRef.byte_offset, aFrameRef.decode_timestamp.ToMicroseconds(), aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds(), aFrameRef.is_sync_point ? " keyframe" : "" ); if (!aImage) { // Image was dropped by decoder or none return yet. // We need more input to continue. mCallback->InputExhausted(); return NS_OK; } bool useNullSample = false; if (mSeekTargetThreshold.isSome()) { if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) { useNullSample = true; } else { mSeekTargetThreshold.reset(); } } // Where our resulting image will end up. RefPtr<MediaData> data; // Bounds. VideoInfo info; info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight); gfx::IntRect visible = gfx::IntRect(0, 0, mPictureWidth, mPictureHeight); if (useNullSample) { data = new NullData(aFrameRef.byte_offset, aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds()); } else if (mUseSoftwareImages) { size_t width = CVPixelBufferGetWidth(aImage); size_t height = CVPixelBufferGetHeight(aImage); DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage); MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be."); VideoData::YCbCrBuffer buffer; // Lock the returned image data. CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); if (rv != kCVReturnSuccess) { NS_ERROR("error locking pixel data"); mCallback->Error(MediaDataDecoderError::DECODE_ERROR); return NS_ERROR_FAILURE; } // Y plane. buffer.mPlanes[0].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0)); buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0); buffer.mPlanes[0].mWidth = width; buffer.mPlanes[0].mHeight = height; buffer.mPlanes[0].mOffset = 0; buffer.mPlanes[0].mSkip = 0; // Cb plane. buffer.mPlanes[1].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1)); buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1); buffer.mPlanes[1].mWidth = (width+1) / 2; buffer.mPlanes[1].mHeight = (height+1) / 2; buffer.mPlanes[1].mOffset = 0; buffer.mPlanes[1].mSkip = 1; // Cr plane. buffer.mPlanes[2].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1)); buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1); buffer.mPlanes[2].mWidth = (width+1) / 2; buffer.mPlanes[2].mHeight = (height+1) / 2; buffer.mPlanes[2].mOffset = 1; buffer.mPlanes[2].mSkip = 1; // Copy the image data into our own format. data = VideoData::CreateAndCopyData(info, mImageContainer, aFrameRef.byte_offset, aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds(), buffer, aFrameRef.is_sync_point, aFrameRef.decode_timestamp.ToMicroseconds(), visible); // Unlock the returned image data. CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); } else { #ifndef MOZ_WIDGET_UIKIT IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage); MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer"); RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface); RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface); data = VideoData::CreateFromImage(info, aFrameRef.byte_offset, aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds(), image.forget(), aFrameRef.is_sync_point, aFrameRef.decode_timestamp.ToMicroseconds(), visible); #else MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS"); #endif } if (!data) { NS_ERROR("Couldn't create VideoData for frame"); mCallback->Error(MediaDataDecoderError::FATAL_ERROR); return NS_ERROR_FAILURE; } // Frames come out in DTS order but we need to output them // in composition order. MonitorAutoLock mon(mMonitor); mReorderQueue.Push(data); if (mReorderQueue.Length() > mMaxRefFrames) { mCallback->Output(mReorderQueue.Pop().get()); } mCallback->InputExhausted(); LOG("%llu decoded frames queued", static_cast<unsigned long long>(mReorderQueue.Length())); return NS_OK; }
// Copy and return a decoded frame. nsresult AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage, AppleVDADecoder::AppleFrameRef aFrameRef) { if (mIsShutDown || mIsFlushing) { // We are in the process of flushing or shutting down; ignore frame. return NS_OK; } LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s", aFrameRef.byte_offset, aFrameRef.decode_timestamp.ToMicroseconds(), aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds(), aFrameRef.is_sync_point ? " keyframe" : "" ); if (mQueuedSamples > mMaxRefFrames) { // We had stopped requesting more input because we had received too much at // the time. We can ask for more once again. mCallback->InputExhausted(); } MOZ_ASSERT(mQueuedSamples); mQueuedSamples--; if (!aImage) { // Image was dropped by decoder. return NS_OK; } // Where our resulting image will end up. nsRefPtr<VideoData> data; // Bounds. VideoInfo info; info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight); gfx::IntRect visible = gfx::IntRect(0, 0, mPictureWidth, mPictureHeight); if (mUseSoftwareImages) { size_t width = CVPixelBufferGetWidth(aImage); size_t height = CVPixelBufferGetHeight(aImage); DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage); MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be."); VideoData::YCbCrBuffer buffer; // Lock the returned image data. CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); if (rv != kCVReturnSuccess) { NS_ERROR("error locking pixel data"); mCallback->Error(); return NS_ERROR_FAILURE; } // Y plane. buffer.mPlanes[0].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0)); buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0); buffer.mPlanes[0].mWidth = width; buffer.mPlanes[0].mHeight = height; buffer.mPlanes[0].mOffset = 0; buffer.mPlanes[0].mSkip = 0; // Cb plane. buffer.mPlanes[1].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1)); buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1); buffer.mPlanes[1].mWidth = (width+1) / 2; buffer.mPlanes[1].mHeight = (height+1) / 2; buffer.mPlanes[1].mOffset = 0; buffer.mPlanes[1].mSkip = 1; // Cr plane. buffer.mPlanes[2].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1)); buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1); buffer.mPlanes[2].mWidth = (width+1) / 2; buffer.mPlanes[2].mHeight = (height+1) / 2; buffer.mPlanes[2].mOffset = 1; buffer.mPlanes[2].mSkip = 1; // Copy the image data into our own format. data = VideoData::Create(info, mImageContainer, nullptr, aFrameRef.byte_offset, aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds(), buffer, aFrameRef.is_sync_point, aFrameRef.decode_timestamp.ToMicroseconds(), visible); // Unlock the returned image data. CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); } else { IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage); MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer"); nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface); nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE); layers::MacIOSurfaceImage* videoImage = static_cast<layers::MacIOSurfaceImage*>(image.get()); videoImage->SetSurface(macSurface); data = VideoData::CreateFromImage(info, mImageContainer, aFrameRef.byte_offset, aFrameRef.composition_timestamp.ToMicroseconds(), aFrameRef.duration.ToMicroseconds(), image.forget(), aFrameRef.is_sync_point, aFrameRef.decode_timestamp.ToMicroseconds(), visible); } if (!data) { NS_ERROR("Couldn't create VideoData for frame"); mCallback->Error(); return NS_ERROR_FAILURE; } // Frames come out in DTS order but we need to output them // in composition order. MonitorAutoLock mon(mMonitor); mReorderQueue.Push(data); while (mReorderQueue.Length() > mMaxRefFrames) { mCallback->Output(mReorderQueue.Pop().get()); } LOG("%llu decoded frames queued", static_cast<unsigned long long>(mReorderQueue.Length())); return NS_OK; }
void jit_gl_hap_draw_frame(void *jitob, CVImageBufferRef frame) { t_jit_gl_hap * x = (t_jit_gl_hap*)jitob; CFTypeID imageType = CFGetTypeID(frame); OSType newPixelFormat; if(x->validframe) return; if (imageType == CVPixelBufferGetTypeID()) { // Update the texture CVBufferRetain(frame); if(x->buffer) { CVPixelBufferUnlockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly); CVBufferRelease(x->buffer); } x->buffer = frame; CVPixelBufferLockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly); x->dim[0] = CVPixelBufferGetWidth(x->buffer); x->dim[1] = CVPixelBufferGetHeight(x->buffer); newPixelFormat = CVPixelBufferGetPixelFormatType(x->buffer); if(x->buffer && x->hap_format==JIT_GL_HAP_PF_HAP) { size_t extraRight, extraBottom; unsigned int bitsPerPixel; size_t bytesPerRow; size_t actualBufferSize; CVPixelBufferGetExtendedPixels(x->buffer, NULL, &extraRight, NULL, &extraBottom); x->roundedWidth = x->dim[0] + extraRight; x->roundedHeight = x->dim[1] + extraBottom; if (x->roundedWidth % 4 != 0 || x->roundedHeight % 4 != 0) { x->validframe = 0; return; } switch (newPixelFormat) { case kHapPixelFormatTypeRGB_DXT1: x->newInternalFormat = GL_COMPRESSED_RGB_S3TC_DXT1_EXT; bitsPerPixel = 4; break; case kHapPixelFormatTypeRGBA_DXT5: case kHapPixelFormatTypeYCoCg_DXT5: x->newInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; bitsPerPixel = 8; break; default: // we don't support non-DXT pixel buffers x->validframe = 0; return; break; } x->useshader = (newPixelFormat == kHapPixelFormatTypeYCoCg_DXT5); bytesPerRow = (x->roundedWidth * bitsPerPixel) / 8; x->newDataLength = bytesPerRow * x->roundedHeight; // usually not the full length of the buffer actualBufferSize = CVPixelBufferGetDataSize(x->buffer); // Check the buffer is as large as we expect it to be if (x->newDataLength > actualBufferSize) { x->validframe = 0; return; } // If we got this far we're good to go x->validframe = 1; x->target = GL_TEXTURE_2D; if(!x->flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), 1); x->flipped = 1; } //x->drawhap = 1; } else if(x->buffer) {// && x->hap_format==JIT_GL_HAP_PF_HAP) { if( newPixelFormat == k24RGBPixelFormat ) x->newInternalFormat = GL_RGB8; else if( newPixelFormat == k32BGRAPixelFormat ) x->newInternalFormat = GL_RGBA8; else { x->validframe = 0; return; } x->roundedWidth = x->dim[0]; x->roundedHeight = x->dim[1]; x->newDataLength = CVPixelBufferGetDataSize(x->buffer); x->rowLength = CVPixelBufferGetBytesPerRow( x->buffer ) / (x->hap_format==JIT_GL_HAP_PF_RGB ? 3 : 4); x->target = GL_TEXTURE_RECTANGLE_EXT; if(!x->flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), 1); x->flipped = 1; } x->validframe = 1; } } else { #ifdef MAC_VERSION CGSize imageSize = CVImageBufferGetEncodedSize(frame); bool flipped = CVOpenGLTextureIsFlipped(frame); x->texture = CVOpenGLTextureGetName(frame); x->useshader = 0; x->dim[0] = (t_atom_long)imageSize.width; x->dim[1] = (t_atom_long)imageSize.height; x->validframe = 1; x->target = GL_TEXTURE_RECTANGLE_ARB; if(x->flipped!=flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), flipped); x->flipped = flipped; } #endif } }
// Copy and return a decoded frame. nsresult AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage, nsAutoPtr<FrameRef> aFrameRef) { size_t width = CVPixelBufferGetWidth(aImage); size_t height = CVPixelBufferGetHeight(aImage); LOG(" got decoded frame data... %ux%u %s", width, height, CVPixelBufferIsPlanar(aImage) ? "planar" : "chunked"); #ifdef DEBUG size_t planes = CVPixelBufferGetPlaneCount(aImage); for (size_t i = 0; i < planes; ++i) { size_t stride = CVPixelBufferGetBytesPerRowOfPlane(aImage, i); LOG(" plane %u %ux%u rowbytes %u", (unsigned)i, CVPixelBufferGetWidthOfPlane(aImage, i), CVPixelBufferGetHeightOfPlane(aImage, i), (unsigned)stride); } MOZ_ASSERT(planes == 2); #endif // DEBUG VideoData::YCbCrBuffer buffer; // Lock the returned image data. CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); if (rv != kCVReturnSuccess) { NS_ERROR("error locking pixel data"); mCallback->Error(); return NS_ERROR_FAILURE; } // Y plane. buffer.mPlanes[0].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0)); buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0); buffer.mPlanes[0].mWidth = width; buffer.mPlanes[0].mHeight = height; buffer.mPlanes[0].mOffset = 0; buffer.mPlanes[0].mSkip = 0; // Cb plane. buffer.mPlanes[1].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1)); buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1); buffer.mPlanes[1].mWidth = (width+1) / 2; buffer.mPlanes[1].mHeight = (height+1) / 2; buffer.mPlanes[1].mOffset = 0; buffer.mPlanes[1].mSkip = 1; // Cr plane. buffer.mPlanes[2].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1)); buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1); buffer.mPlanes[2].mWidth = (width+1) / 2; buffer.mPlanes[2].mHeight = (height+1) / 2; buffer.mPlanes[2].mOffset = 1; buffer.mPlanes[2].mSkip = 1; // Bounds. VideoInfo info; info.mDisplay = nsIntSize(width, height); info.mHasVideo = true; gfx::IntRect visible = gfx::IntRect(0, 0, mConfig.display_width, mConfig.display_height); // Copy the image data into our own format. nsAutoPtr<VideoData> data; data = VideoData::Create(info, mImageContainer, nullptr, aFrameRef->byte_offset, aFrameRef->composition_timestamp, aFrameRef->duration, buffer, aFrameRef->is_sync_point, aFrameRef->decode_timestamp, visible); // Unlock the returned image data. CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); if (!data) { NS_ERROR("Couldn't create VideoData for frame"); mCallback->Error(); return NS_ERROR_FAILURE; } // Frames come out in DTS order but we need to output them // in composition order. mReorderQueue.Push(data.forget()); // Assume a frame with a PTS <= current DTS is ready. while (mReorderQueue.Length() > 0) { VideoData* readyData = mReorderQueue.Pop(); if (readyData->mTime <= aFrameRef->decode_timestamp) { LOG("returning queued frame with pts %lld", readyData->mTime); mCallback->Output(readyData); } else { LOG("requeued frame with pts %lld > %lld", readyData->mTime, aFrameRef->decode_timestamp); mReorderQueue.Push(readyData); break; } } LOG("%llu decoded frames queued", static_cast<unsigned long long>(mReorderQueue.Length())); return NS_OK; }
size_t QTPixelBuffer::width() const { return CVPixelBufferGetWidth(m_pixelBuffer); }
GstBuffer * gst_core_video_buffer_new (CVBufferRef cvbuf, GstVideoInfo * vinfo) { CVPixelBufferRef pixbuf = NULL; GstBuffer *buf; GstCoreVideoMeta *meta; guint n_planes; gsize offset[GST_VIDEO_MAX_PLANES]; gint stride[GST_VIDEO_MAX_PLANES]; if (CFGetTypeID (cvbuf) != CVPixelBufferGetTypeID ()) /* TODO: Do we need to handle other buffer types? */ goto error; pixbuf = (CVPixelBufferRef) cvbuf; if (CVPixelBufferLockBaseAddress (pixbuf, 0) != kCVReturnSuccess) { goto error; } buf = gst_buffer_new (); /* add the corevideo meta to free the underlying corevideo buffer */ meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf, gst_core_video_meta_get_info (), NULL); meta->cvbuf = CVBufferRetain (cvbuf); meta->pixbuf = pixbuf; /* set stride, offset and size */ memset (&offset, 0, sizeof (offset)); memset (&stride, 0, sizeof (stride)); if (CVPixelBufferIsPlanar (pixbuf)) { int i, size, off; n_planes = CVPixelBufferGetPlaneCount (pixbuf); off = 0; for (i = 0; i < n_planes; ++i) { stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i); size = stride[i] * CVPixelBufferGetHeightOfPlane (pixbuf, i); offset[i] = off; off += size; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddressOfPlane (pixbuf, i), size, 0, size, NULL, NULL)); } } else { int size; n_planes = 1; stride[0] = CVPixelBufferGetBytesPerRow (pixbuf); offset[0] = 0; size = stride[0] * CVPixelBufferGetHeight (pixbuf); gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddress (pixbuf), size, 0, size, NULL, NULL)); } if (vinfo) { GstVideoMeta *video_meta; video_meta = gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE, vinfo->finfo->format, CVPixelBufferGetWidth (pixbuf), CVPixelBufferGetHeight (pixbuf), n_planes, offset, stride); } return buf; error: return NULL; }
static void h264_enc_process(MSFilter *f) { VTH264EncCtx *ctx = (VTH264EncCtx *)f->data; mblk_t *frame; OSStatus err; CMTime p_time = CMTimeMake(f->ticker->time, 1000); if(!ctx->is_configured) { ms_queue_flush(f->inputs[0]); return; } #if 0 && TARGET_OS_IPHONE CVPixelBufferPoolRef pixbuf_pool = VTCompressionSessionGetPixelBufferPool(ctx->session); if(pixbuf_pool == NULL) { ms_error("VideoToolbox: fails to get the pixel buffer pool"); return; } #endif while((frame = ms_queue_get(f->inputs[0]))) { YuvBuf src_yuv_frame, dst_yuv_frame = {0}; CVPixelBufferRef pixbuf; CFMutableDictionaryRef enc_param = NULL; int i, pixbuf_fmt = kCVPixelFormatType_420YpCbCr8Planar; CFNumberRef value; CFMutableDictionaryRef pixbuf_attr; ms_yuv_buf_init_from_mblk(&src_yuv_frame, frame); #if 0 && TARGET_OS_IPHONE CVPixelBufferPoolCreatePixelBuffer(NULL, pixbuf_pool, &pixbuf); #else pixbuf_attr = CFDictionaryCreateMutable(NULL, 0, NULL, NULL); value = CFNumberCreate(NULL, kCFNumberIntType, &pixbuf_fmt); CFDictionarySetValue(pixbuf_attr, kCVPixelBufferPixelFormatTypeKey, value); CVPixelBufferCreate(NULL, ctx->conf.vsize.width, ctx->conf.vsize.height, kCVPixelFormatType_420YpCbCr8Planar, pixbuf_attr, &pixbuf); CFRelease(pixbuf_attr); #endif CVPixelBufferLockBaseAddress(pixbuf, 0); dst_yuv_frame.w = (int)CVPixelBufferGetWidth(pixbuf); dst_yuv_frame.h = (int)CVPixelBufferGetHeight(pixbuf); for(i=0; i<3; i++) { dst_yuv_frame.planes[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i); dst_yuv_frame.strides[i] = (int)CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i); } ms_yuv_buf_copy(src_yuv_frame.planes, src_yuv_frame.strides, dst_yuv_frame.planes, dst_yuv_frame.strides, (MSVideoSize){dst_yuv_frame.w, dst_yuv_frame.h}); CVPixelBufferUnlockBaseAddress(pixbuf, 0); freemsg(frame); ms_filter_lock(f); if(ctx->fps_changed || ctx->bitrate_changed || ctx->vfu_requested) { CFNumberRef value; enc_param = CFDictionaryCreateMutable(NULL, 0, NULL, NULL); if(ctx->fps_changed) { value = CFNumberCreate(NULL, kCFNumberFloatType, &ctx->conf.fps); CFDictionaryAddValue(enc_param, kVTCompressionPropertyKey_ExpectedFrameRate, value); ctx->fps_changed = FALSE; } if(ctx->bitrate_changed) { value = CFNumberCreate(NULL, kCFNumberIntType, &ctx->conf.required_bitrate); CFDictionaryAddValue(enc_param, kVTCompressionPropertyKey_AverageBitRate, value); ctx->bitrate_changed = FALSE; } if(ctx->vfu_requested) { int force_keyframe = 1; value = CFNumberCreate(NULL, kCFNumberIntType, &force_keyframe); CFDictionaryAddValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame, value); ctx->vfu_requested = FALSE; } } ms_filter_unlock(f); if(!ctx->enable_avpf) { if(ctx->first_frame) { ms_video_starter_first_frame(&ctx->starter, f->ticker->time); } if(ms_video_starter_need_i_frame(&ctx->starter, f->ticker->time)) { if(enc_param == NULL) enc_param = CFDictionaryCreateMutable(NULL, 0, NULL, NULL); if(CFDictionaryGetValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame) == NULL) { int force_keyframe = 1; CFNumberRef value = CFNumberCreate(NULL, kCFNumberIntType, &force_keyframe); CFDictionaryAddValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame, value); } } } if((err = VTCompressionSessionEncodeFrame(ctx->session, pixbuf, p_time, kCMTimeInvalid, enc_param, NULL, NULL)) != noErr) { ms_error("VideoToolbox: could not pass a pixbuf to the encoder: error code %d", err); } CFRelease(pixbuf); ctx->first_frame = FALSE; if(enc_param) CFRelease(enc_param); } ms_mutex_lock(&ctx->mutex); while ((frame = ms_queue_get(&ctx->queue))) { ms_mutex_unlock(&ctx->mutex); ms_queue_put(f->outputs[0], frame); ms_mutex_lock(&ctx->mutex); } ms_mutex_unlock(&ctx->mutex); }
bool CDVDVideoCodecVDA::GetPicture(DVDVideoPicture* pDvdVideoPicture) { // get the top yuv frame, we risk getting the wrong frame if the frame queue // depth is less than the number of encoded reference frames. If queue depth // is greater than the number of encoded reference frames, then the top frame // will never change and we can just grab a ref to the top frame. if (m_use_cvBufferRef) { pthread_mutex_lock(&m_queue_mutex); pDvdVideoPicture->dts = m_display_queue->dts; pDvdVideoPicture->pts = m_display_queue->pts; pDvdVideoPicture->cvBufferRef = m_display_queue->pixel_buffer_ref; m_display_queue->pixel_buffer_ref = NULL; pthread_mutex_unlock(&m_queue_mutex); pDvdVideoPicture->format = RENDER_FMT_CVBREF; pDvdVideoPicture->iFlags = DVP_FLAG_ALLOCATED; pDvdVideoPicture->color_range = 0; pDvdVideoPicture->color_matrix = 4; pDvdVideoPicture->iWidth = CVPixelBufferGetWidth(pDvdVideoPicture->cvBufferRef); pDvdVideoPicture->iHeight = CVPixelBufferGetHeight(pDvdVideoPicture->cvBufferRef); pDvdVideoPicture->iDisplayWidth = pDvdVideoPicture->iWidth; pDvdVideoPicture->iDisplayHeight = pDvdVideoPicture->iHeight; } else { FourCharCode pixel_buffer_format; CVPixelBufferRef picture_buffer_ref; // clone the video picture buffer settings. *pDvdVideoPicture = m_videobuffer; // get the top yuv frame, we risk getting the wrong frame if the frame queue // depth is less than the number of encoded reference frames. If queue depth // is greater than the number of encoded reference frames, then the top frame // will never change and we can just grab a ref to the top frame. This way // we don't lockout the vdadecoder while doing color format convert. pthread_mutex_lock(&m_queue_mutex); picture_buffer_ref = m_display_queue->pixel_buffer_ref; pixel_buffer_format = m_display_queue->pixel_buffer_format; pDvdVideoPicture->dts = m_display_queue->dts; pDvdVideoPicture->pts = m_display_queue->pts; pthread_mutex_unlock(&m_queue_mutex); // lock the CVPixelBuffer down CVPixelBufferLockBaseAddress(picture_buffer_ref, 0); int row_stride = CVPixelBufferGetBytesPerRowOfPlane(picture_buffer_ref, 0); uint8_t *base_ptr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(picture_buffer_ref, 0); if (base_ptr) { if (pixel_buffer_format == kCVPixelFormatType_422YpCbCr8) UYVY422_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture); else if (pixel_buffer_format == kCVPixelFormatType_32BGRA) BGRA_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture); } // unlock the CVPixelBuffer CVPixelBufferUnlockBaseAddress(picture_buffer_ref, 0); } // now we can pop the top frame. DisplayQueuePop(); //CLog::Log(LOGNOTICE, "%s - VDADecoderDecode dts(%f), pts(%f)", __FUNCTION__, // pDvdVideoPicture->dts, pDvdVideoPicture->pts); return true; }
CGImageRef CGImageLuminanceSource::createImageFromBuffer(CVPixelBufferRef buffer) { return createImageFromBuffer(buffer, 0, 0, (int)CVPixelBufferGetWidth(buffer), (int)CVPixelBufferGetHeight(buffer)); }