// Callback passed to the VideoToolbox decoder for returning data. // This needs to be static because the API takes a C-style pair of // function and userdata pointers. This validates parameters and // forwards the decoded image back to an object method. static void PlatformCallback(void* decompressionOutputRefCon, void* sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags flags, CVImageBufferRef image, CMTime presentationTimeStamp, CMTime presentationDuration) { LOG("AppleVideoDecoder %s status %d flags %d", __func__, status, flags); AppleVTDecoder* decoder = static_cast<AppleVTDecoder*>(decompressionOutputRefCon); nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef( static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon)); // Validate our arguments. if (status != noErr || !image) { NS_WARNING("VideoToolbox decoder returned no data"); image = nullptr; } else if (flags & kVTDecodeInfo_FrameDropped) { NS_WARNING(" ...frame tagged as dropped..."); } else { MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(), "VideoToolbox returned an unexpected image type"); } decoder->OutputFrame(image, *frameRef); }
// Callback passed to the VideoToolbox decoder for returning data. // This needs to be static because the API takes a C-style pair of // function and userdata pointers. This validates parameters and // forwards the decoded image back to an object method. static void PlatformCallback(void* decompressionOutputRefCon, void* sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags flags, CVImageBufferRef image, CMTime presentationTimeStamp, CMTime presentationDuration) { LOG("AppleVideoDecoder %s status %d flags %d", __func__, status, flags); AppleVTDecoder* decoder = static_cast<AppleVTDecoder*>(decompressionOutputRefCon); nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef( static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon)); // Validate our arguments. if (status != noErr || !image) { NS_WARNING("VideoToolbox decoder returned no data"); return; } if (flags & kVTDecodeInfo_FrameDropped) { NS_WARNING(" ...frame dropped..."); } MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(), "VideoToolbox returned an unexpected image type"); // Forward the data back to an object method which can access // the correct MP4Reader callback. decoder->OutputFrame(image, frameRef); }
GstBuffer * gst_core_media_buffer_new (CMSampleBufferRef sample_buf, gboolean use_video_meta, GstVideoTextureCache * cache) { CVImageBufferRef image_buf; CMBlockBufferRef block_buf; GstBuffer *buf; image_buf = CMSampleBufferGetImageBuffer (sample_buf); block_buf = CMSampleBufferGetDataBuffer (sample_buf); buf = gst_buffer_new (); gst_core_media_meta_add (buf, sample_buf, image_buf, block_buf); if (image_buf != NULL && CFGetTypeID (image_buf) == CVPixelBufferGetTypeID () && cache) { GstVideoInfo info; gboolean has_padding = FALSE; CVPixelBufferRef pixel_buf = (CVPixelBufferRef) image_buf; if (!gst_video_info_init_from_pixel_buffer (&info, pixel_buf)) { goto error; } gst_core_video_wrap_pixel_buffer (buf, &info, pixel_buf, cache, &has_padding); /* If the video meta API is not supported, remove padding by * copying the core media buffer to a system memory buffer */ if (has_padding && !use_video_meta) { GstBuffer *copy_buf; copy_buf = gst_core_media_buffer_new_from_buffer (buf, &info); if (!copy_buf) { goto error; } gst_buffer_unref (buf); buf = copy_buf; } } else if (block_buf != NULL) { if (!gst_core_media_buffer_wrap_block_buffer (buf, block_buf)) { goto error; } } else { goto error; } return buf; error: if (buf) { gst_buffer_unref (buf); } return NULL; }
static void gst_core_media_meta_add (GstBuffer * buffer, CMSampleBufferRef sample_buf, CVImageBufferRef image_buf, CMBlockBufferRef block_buf) { GstCoreMediaMeta *meta; meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buffer, gst_core_media_meta_get_info (), NULL); CFRetain (sample_buf); if (image_buf) CVBufferRetain (image_buf); if (block_buf) CFRetain (block_buf); meta->sample_buf = sample_buf; meta->image_buf = image_buf; meta->block_buf = block_buf; if (image_buf != NULL && CFGetTypeID (image_buf) == CVPixelBufferGetTypeID ()) meta->pixel_buf = (CVPixelBufferRef) image_buf; else meta->pixel_buf = NULL; }
// Callback passed to the VideoToolbox decoder for returning data. // This needs to be static because the API takes a C-style pair of // function and userdata pointers. This validates parameters and // forwards the decoded image back to an object method. static void PlatformCallback(void* decompressionOutputRefCon, CFDictionaryRef frameInfo, OSStatus status, VDADecodeInfoFlags infoFlags, CVImageBufferRef image) { LOG("AppleVDADecoder[%s] status %d flags %d retainCount %ld", __func__, status, infoFlags, CFGetRetainCount(frameInfo)); // Validate our arguments. // According to Apple's TN2267 // The output callback is still called for all flushed frames, // but no image buffers will be returned. // FIXME: Distinguish between errors and empty flushed frames. if (status != noErr || !image) { NS_WARNING("AppleVDADecoder decoder returned no data"); return; } MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(), "AppleVDADecoder returned an unexpected image type"); if (infoFlags & kVDADecodeInfo_FrameDropped) { NS_WARNING(" ...frame dropped..."); return; } AppleVDADecoder* decoder = static_cast<AppleVDADecoder*>(decompressionOutputRefCon); AutoCFRelease<CFNumberRef> ptsref = (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_PTS")); AutoCFRelease<CFNumberRef> dtsref = (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_DTS")); AutoCFRelease<CFNumberRef> durref = (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_DURATION")); AutoCFRelease<CFNumberRef> boref = (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_OFFSET")); AutoCFRelease<CFNumberRef> kfref = (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME")); Microseconds dts; Microseconds pts; Microseconds duration; int64_t byte_offset; char is_sync_point; CFNumberGetValue(ptsref, kCFNumberSInt64Type, &pts); CFNumberGetValue(dtsref, kCFNumberSInt64Type, &dts); CFNumberGetValue(durref, kCFNumberSInt64Type, &duration); CFNumberGetValue(boref, kCFNumberSInt64Type, &byte_offset); CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point); nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef( new AppleVDADecoder::AppleFrameRef(dts, pts, duration, byte_offset, is_sync_point == 1)); // Forward the data back to an object method which can access // the correct MP4Reader callback. decoder->OutputFrame(image, frameRef); }
GstBuffer * gst_core_video_buffer_new (CVBufferRef cvbuf, GstVideoInfo * vinfo) { CVPixelBufferRef pixbuf = NULL; GstBuffer *buf; GstCoreVideoMeta *meta; guint n_planes; gsize offset[GST_VIDEO_MAX_PLANES]; gint stride[GST_VIDEO_MAX_PLANES]; if (CFGetTypeID (cvbuf) != CVPixelBufferGetTypeID ()) /* TODO: Do we need to handle other buffer types? */ goto error; pixbuf = (CVPixelBufferRef) cvbuf; if (CVPixelBufferLockBaseAddress (pixbuf, kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) { goto error; } buf = gst_buffer_new (); /* add the corevideo meta to free the underlying corevideo buffer */ meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf, gst_core_video_meta_get_info (), NULL); meta->cvbuf = CVBufferRetain (cvbuf); meta->pixbuf = pixbuf; /* set stride, offset and size */ memset (&offset, 0, sizeof (offset)); memset (&stride, 0, sizeof (stride)); if (CVPixelBufferIsPlanar (pixbuf)) { int i, size, off; n_planes = CVPixelBufferGetPlaneCount (pixbuf); off = 0; for (i = 0; i < n_planes; ++i) { stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i); size = stride[i] * CVPixelBufferGetHeightOfPlane (pixbuf, i); offset[i] = off; off += size; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddressOfPlane (pixbuf, i), size, 0, size, NULL, NULL)); } } else { int size; n_planes = 1; stride[0] = CVPixelBufferGetBytesPerRow (pixbuf); offset[0] = 0; size = stride[0] * vinfo->height; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddress (pixbuf), size, 0, size, NULL, NULL)); } if (vinfo) { GstVideoMeta *video_meta; video_meta = gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE, vinfo->finfo->format, vinfo->width, vinfo->height, n_planes, offset, stride); } return buf; error: return NULL; }
void jit_gl_hap_draw_frame(void *jitob, CVImageBufferRef frame) { t_jit_gl_hap * x = (t_jit_gl_hap*)jitob; CFTypeID imageType = CFGetTypeID(frame); OSType newPixelFormat; if(x->validframe) return; if (imageType == CVPixelBufferGetTypeID()) { // Update the texture CVBufferRetain(frame); if(x->buffer) { CVPixelBufferUnlockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly); CVBufferRelease(x->buffer); } x->buffer = frame; CVPixelBufferLockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly); x->dim[0] = CVPixelBufferGetWidth(x->buffer); x->dim[1] = CVPixelBufferGetHeight(x->buffer); newPixelFormat = CVPixelBufferGetPixelFormatType(x->buffer); if(x->buffer && x->hap_format==JIT_GL_HAP_PF_HAP) { size_t extraRight, extraBottom; unsigned int bitsPerPixel; size_t bytesPerRow; size_t actualBufferSize; CVPixelBufferGetExtendedPixels(x->buffer, NULL, &extraRight, NULL, &extraBottom); x->roundedWidth = x->dim[0] + extraRight; x->roundedHeight = x->dim[1] + extraBottom; if (x->roundedWidth % 4 != 0 || x->roundedHeight % 4 != 0) { x->validframe = 0; return; } switch (newPixelFormat) { case kHapPixelFormatTypeRGB_DXT1: x->newInternalFormat = GL_COMPRESSED_RGB_S3TC_DXT1_EXT; bitsPerPixel = 4; break; case kHapPixelFormatTypeRGBA_DXT5: case kHapPixelFormatTypeYCoCg_DXT5: x->newInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; bitsPerPixel = 8; break; default: // we don't support non-DXT pixel buffers x->validframe = 0; return; break; } x->useshader = (newPixelFormat == kHapPixelFormatTypeYCoCg_DXT5); bytesPerRow = (x->roundedWidth * bitsPerPixel) / 8; x->newDataLength = bytesPerRow * x->roundedHeight; // usually not the full length of the buffer actualBufferSize = CVPixelBufferGetDataSize(x->buffer); // Check the buffer is as large as we expect it to be if (x->newDataLength > actualBufferSize) { x->validframe = 0; return; } // If we got this far we're good to go x->validframe = 1; x->target = GL_TEXTURE_2D; if(!x->flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), 1); x->flipped = 1; } //x->drawhap = 1; } else if(x->buffer) {// && x->hap_format==JIT_GL_HAP_PF_HAP) { if( newPixelFormat == k24RGBPixelFormat ) x->newInternalFormat = GL_RGB8; else if( newPixelFormat == k32BGRAPixelFormat ) x->newInternalFormat = GL_RGBA8; else { x->validframe = 0; return; } x->roundedWidth = x->dim[0]; x->roundedHeight = x->dim[1]; x->newDataLength = CVPixelBufferGetDataSize(x->buffer); x->rowLength = CVPixelBufferGetBytesPerRow( x->buffer ) / (x->hap_format==JIT_GL_HAP_PF_RGB ? 3 : 4); x->target = GL_TEXTURE_RECTANGLE_EXT; if(!x->flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), 1); x->flipped = 1; } x->validframe = 1; } } else { #ifdef MAC_VERSION CGSize imageSize = CVImageBufferGetEncodedSize(frame); bool flipped = CVOpenGLTextureIsFlipped(frame); x->texture = CVOpenGLTextureGetName(frame); x->useshader = 0; x->dim[0] = (t_atom_long)imageSize.width; x->dim[1] = (t_atom_long)imageSize.height; x->validframe = 1; x->target = GL_TEXTURE_RECTANGLE_ARB; if(x->flipped!=flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), flipped); x->flipped = flipped; } #endif } }
GstBuffer * gst_core_media_buffer_new (CMSampleBufferRef sample_buf, gboolean use_video_meta, gboolean map) { CVImageBufferRef image_buf; CMBlockBufferRef block_buf; GstCoreMediaMeta *meta; GstBuffer *buf; image_buf = CMSampleBufferGetImageBuffer (sample_buf); block_buf = CMSampleBufferGetDataBuffer (sample_buf); buf = gst_buffer_new (); meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf, gst_core_media_meta_get_info (), NULL); CFRetain (sample_buf); if (image_buf) CVBufferRetain (image_buf); if (block_buf) CFRetain (block_buf); meta->sample_buf = sample_buf; meta->image_buf = image_buf; meta->pixel_buf = NULL; meta->block_buf = block_buf; if (image_buf != NULL && CFGetTypeID (image_buf) == CVPixelBufferGetTypeID ()) { GstVideoInfo info; gboolean has_padding = FALSE; meta->pixel_buf = (CVPixelBufferRef) image_buf; if (!gst_video_info_init_from_pixel_buffer (&info, meta->pixel_buf)) { goto error; } if (!gst_core_media_buffer_wrap_pixel_buffer (buf, &info, meta->pixel_buf, &has_padding, map)) { goto error; } /* If the video meta API is not supported, remove padding by * copying the core media buffer to a system memory buffer */ if (map && has_padding && !use_video_meta) { GstBuffer *copy_buf; copy_buf = gst_core_media_buffer_new_from_buffer (buf, &info); if (!copy_buf) { goto error; } gst_buffer_unref (buf); buf = copy_buf; } } else if (block_buf != NULL) { if (map && !gst_core_media_buffer_wrap_block_buffer (buf, block_buf)) { goto error; } } else { goto error; } return buf; error: if (buf) { gst_buffer_unref (buf); } return NULL; }
GstBuffer * gst_core_media_buffer_new (CMSampleBufferRef sample_buf) { CVImageBufferRef image_buf; CVPixelBufferRef pixel_buf; CMBlockBufferRef block_buf; gchar *data = NULL; UInt32 size; OSStatus status; GstBuffer *buf; GstCoreMediaMeta *meta; image_buf = CMSampleBufferGetImageBuffer (sample_buf); pixel_buf = NULL; block_buf = CMSampleBufferGetDataBuffer (sample_buf); if (image_buf != NULL && CFGetTypeID (image_buf) == CVPixelBufferGetTypeID ()) { pixel_buf = (CVPixelBufferRef) image_buf; if (CVPixelBufferLockBaseAddress (pixel_buf, kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) { goto error; } if (CVPixelBufferIsPlanar (pixel_buf)) { gint plane_count, plane_idx; data = CVPixelBufferGetBaseAddressOfPlane (pixel_buf, 0); size = 0; plane_count = CVPixelBufferGetPlaneCount (pixel_buf); for (plane_idx = 0; plane_idx != plane_count; plane_idx++) { size += CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, plane_idx) * CVPixelBufferGetHeightOfPlane (pixel_buf, plane_idx); } } else { data = CVPixelBufferGetBaseAddress (pixel_buf); size = CVPixelBufferGetBytesPerRow (pixel_buf) * CVPixelBufferGetHeight (pixel_buf); } } else if (block_buf != NULL) { status = CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data); if (status != noErr) goto error; size = CMBlockBufferGetDataLength (block_buf); } else { goto error; } buf = gst_buffer_new (); meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf, gst_core_media_meta_get_info (), NULL); CVBufferRetain ((CVBufferRef)sample_buf); meta->sample_buf = sample_buf; meta->image_buf = image_buf; meta->pixel_buf = pixel_buf; meta->block_buf = block_buf; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data, size, 0, size, NULL, NULL)); return buf; error: return NULL; }