//creates raw yv12 from sourceframe static ComponentResult convertColorSpace(VP8EncoderGlobals glob, ICMCompressorSourceFrameRef sourceFrame) { CVPixelBufferRef sourcePixelBuffer = NULL; sourcePixelBuffer = ICMCompressorSourceFrameGetPixelBuffer(sourceFrame); CVPixelBufferLockBaseAddress(sourcePixelBuffer, 0); //copy our frame to the raw image. TODO: I'm not checking for any padding here. unsigned char *srcBytes = CVPixelBufferGetBaseAddress(sourcePixelBuffer); dbg_printf("[vp8e - %08lx] CVPixelBufferGetBaseAddress %x\n", (UInt32)glob, sourcePixelBuffer); dbg_printf("[vp8e - %08lx] CopyChunkyYUV422ToPlanarYV12 %dx%d, %x, %d, %x, %d, %x, %d, %x, %d \n", (UInt32)glob, glob->width, glob->height, CVPixelBufferGetBaseAddress(sourcePixelBuffer), CVPixelBufferGetBytesPerRow(sourcePixelBuffer), glob->raw->planes[PLANE_Y], glob->raw->stride[PLANE_Y], glob->raw->planes[PLANE_U], glob->raw->stride[PLANE_U], glob->raw->planes[PLANE_V], glob->raw->stride[PLANE_V]); ComponentResult err = CopyChunkyYUV422ToPlanarYV12(glob->width, glob->height, CVPixelBufferGetBaseAddress(sourcePixelBuffer), CVPixelBufferGetBytesPerRow(sourcePixelBuffer), glob->raw->planes[PLANE_Y], glob->raw->stride[PLANE_Y], glob->raw->planes[PLANE_U], glob->raw->stride[PLANE_U], glob->raw->planes[PLANE_V], glob->raw->stride[PLANE_V]); CVPixelBufferUnlockBaseAddress(sourcePixelBuffer, 0); dbg_printf("[vp8e - %08lx] CVPixelBufferUnlockBaseAddress %x\n", sourcePixelBuffer); return err; }
void dump_cvpixel_buffer (CVPixelBufferRef pixbuf) { gsize left, right, top, bottom; GST_LOG ("buffer %p", pixbuf); if (CVPixelBufferLockBaseAddress (pixbuf, 0)) { GST_WARNING ("Couldn't lock base adress on pixel buffer !"); return; } GST_LOG ("Width:%" G_GSIZE_FORMAT " , Height:%" G_GSIZE_FORMAT, CVPixelBufferGetWidth (pixbuf), CVPixelBufferGetHeight (pixbuf)); GST_LOG ("Format:%" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (CVPixelBufferGetPixelFormatType (pixbuf))); GST_LOG ("base address:%p", CVPixelBufferGetBaseAddress (pixbuf)); GST_LOG ("Bytes per row:%" G_GSIZE_FORMAT, CVPixelBufferGetBytesPerRow (pixbuf)); GST_LOG ("Data Size:%" G_GSIZE_FORMAT, CVPixelBufferGetDataSize (pixbuf)); GST_LOG ("Plane count:%" G_GSIZE_FORMAT, CVPixelBufferGetPlaneCount (pixbuf)); CVPixelBufferGetExtendedPixels (pixbuf, &left, &right, &top, &bottom); GST_LOG ("Extended pixels. left/right/top/bottom : %" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT, left, right, top, bottom); CVPixelBufferUnlockBaseAddress (pixbuf, 0); }
OSErr qQuickTimeDecoderCallback(void *decompressionTrackingRefCon, OSStatus result, ICMDecompressionTrackingFlags decompressionTrackingFlags, CVPixelBufferRef pixelBuffer, TimeValue64 displayTime, TimeValue64 displayDuration, ICMValidTimeFlags validTimeFlags, void *reserved, void *sourceFrameRefCon) { OSStatus err; // The decompressionTrackingRefCon might actually be a QCamera or a QDecoder, but we are // careful to ensure that they begin with the same layout as QDecoderCallbackData. QDecoder* decoder = (QDecoder*)decompressionTrackingRefCon; // Declare up here because we need to compile on archaic GCC on Win32 void* base; size_t width; size_t height; size_t size; // fprintf(QSTDERR, "\n\tdecode %d ", decoder->outFrameCount); if (!pixelBuffer) { fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no pixel buffer (why?)"); return noErr; } if (!(kICMDecompressionTracking_EmittingFrame & decompressionTrackingFlags)) { fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no frame emitted (why?)"); return noErr; } decoder->outFrameCount++; // Lock the pixel-buffer until we're done with it. err = CVPixelBufferLockBaseAddress(pixelBuffer, 0); if (err != noErr) { fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): can't lock CVPixelBuffer"); // XXXX: so what do we do about it? return err; } // Get info about the raw pixel-buffer data. base = (void*)CVPixelBufferGetBaseAddress(pixelBuffer); width = CVPixelBufferGetWidth(pixelBuffer); height = CVPixelBufferGetHeight(pixelBuffer); // size = width*height*4; size = height * CVPixelBufferGetBytesPerRow(pixelBuffer); // Stash the data so that Squeak can retrieve it. qStoreCallbackData(base, &(decoder->callbackData), size); // We're done with the pixel-buffer CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); // Signal the semaphore so that Squeak can grab the data that we just stashed. interpreterProxy->signalSemaphoreWithIndex(decoder->semaIndex); return noErr; }
static int vdadec_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { VDADecoderContext *ctx = avctx->priv_data; AVFrame *pic = data; int ret; set_context(avctx); ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt); restore_context(avctx); if (*got_frame) { AVBufferRef *buffer = pic->buf[0]; VDABufferContext *context = av_buffer_get_opaque(buffer); CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3]; CVPixelBufferRetain(cv_buffer); CVPixelBufferLockBaseAddress(cv_buffer, 0); context->cv_buffer = cv_buffer; pic->format = ctx->pix_fmt; if (CVPixelBufferIsPlanar(cv_buffer)) { int i, count = CVPixelBufferGetPlaneCount(cv_buffer); av_assert0(count < 4); for (i = 0; i < count; i++) { pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i); pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i); } } else { pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer); pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer); } } avctx->pix_fmt = ctx->pix_fmt; return ret; }
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame) { InputStream *ist = s->opaque; VDAContext *vda = ist->hwaccel_ctx; CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3]; OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf); CVReturn err; uint8_t *data[4] = { 0 }; int linesize[4] = { 0 }; int planes, ret, i; av_frame_unref(vda->tmp_frame); switch (pixel_format) { case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break; case kCVPixelFormatType_422YpCbCr8: vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break; default: av_log(NULL, AV_LOG_ERROR, "Unsupported pixel format: %u\n", pixel_format); return AVERROR(ENOSYS); } vda->tmp_frame->width = frame->width; vda->tmp_frame->height = frame->height; ret = av_frame_get_buffer(vda->tmp_frame, 32); if (ret < 0) return ret; err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly); if (err != kCVReturnSuccess) { av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n"); return AVERROR_UNKNOWN; } if (CVPixelBufferIsPlanar(pixbuf)) { planes = CVPixelBufferGetPlaneCount(pixbuf); for (i = 0; i < planes; i++) { data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i); linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i); } } else { data[0] = CVPixelBufferGetBaseAddress(pixbuf); linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf); } av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize, data, linesize, vda->tmp_frame->format, frame->width, frame->height); ret = av_frame_copy_props(vda->tmp_frame, frame); if (ret < 0) return ret; av_frame_unref(frame); av_frame_move_ref(frame, vda->tmp_frame); return 0; }
int qEncodeAPI(QEncoder* encoder, char* bytes, int byteSize) { OSErr err; CVPixelBufferPoolRef pixelBufferPool; CVPixelBufferRef pixelBuffer; unsigned char* baseAddress; size_t bufferSize; // Grab a pixel buffer from the pool (ICMCompressionSessionEncodeFrame() needs the input // data to be passed in as a CVPixelBufferRef). pixelBufferPool = ICMCompressionSessionGetPixelBufferPool(encoder->session); err = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBuffer); if (err != noErr) { fprintf(QSTDERR, "\nqEncodeQT(): could not obtain a pixel buffer from pool"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); return -5; } // Lock the pixel-buffer so that we can copy our data into it for encoding // XXXX: would be nice to avoid this copy. err = CVPixelBufferLockBaseAddress(pixelBuffer, 0); if (err != noErr) { fprintf(QSTDERR, "\nqEncodeQT(): could not lock the pixel buffer"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); CVPixelBufferRelease(pixelBuffer); return -5; } baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer); // bufferSize = CVPixelBufferGetWidth(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer) * 4; bufferSize = CVPixelBufferGetBytesPerRow(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer); // XXXX: for now, just for debugging. For production, we should notice if this happens and deal with it "appropriately". if (byteSize != bufferSize) { fprintf(QSTDERR, "\nqEncodeQT(): input data size (%d) does not match pixel-buffer data size (%d)", byteSize, bufferSize); } // Copy the data and unlock the buffer memcpy(baseAddress, bytes, bufferSize); CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); // Encode the frame (now in pixel-buffer form). err = ICMCompressionSessionEncodeFrame( encoder->session, pixelBuffer, 0, 0, 0, // we're not specifying a frame time NULL, NULL, NULL); if (err != noErr) { fprintf(QSTDERR, "\nqEncodeQT(): could not encode the frame"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); CVPixelBufferRelease(pixelBuffer); return -5; } CVPixelBufferRelease(pixelBuffer); return 0; }
static void VDADecoderCallback (void *decompressionOutputRefCon, CFDictionaryRef frameInfo, OSStatus status, uint32_t infoFlags, CVImageBufferRef imageBuffer) { MoonVDADecoder *decoder = (MoonVDADecoder *) decompressionOutputRefCon; VideoStream *vs = (VideoStream *) decoder->GetStream (); // FIXME: Is this always 1 thread? Can we optimize this decoder->GetDeployment ()->RegisterThread (); Deployment::SetCurrent (decoder->GetDeployment ()); if (imageBuffer == NULL) { return; } OSType format_type = CVPixelBufferGetPixelFormatType (imageBuffer); if (format_type != kCVPixelFormatType_422YpCbCr8) { g_warning ("Mismatched format in VDA"); return; } MediaFrame *mf = (MediaFrame *) CFDictionaryGetValue (frameInfo, CFSTR ("MoonMediaFrame")); mf->AddState (MediaFrameVUY2); mf->FreeBuffer (); mf->SetBufLen (0); mf->srcSlideY = 0; mf->srcSlideH = vs->GetHeight (); mf->width = vs->GetWidth (); mf->height = vs->GetHeight (); CVPixelBufferLockBaseAddress (imageBuffer, 0); mf->data_stride [0] = (uint8_t *) CVPixelBufferGetBaseAddress (imageBuffer); mf->srcStride [0] = CVPixelBufferGetBytesPerRow (imageBuffer); mf->AddState (MediaFrameDecoded); mf->decoder_specific_data = imageBuffer; CVPixelBufferRetain (imageBuffer); decoder->ReportDecodeFrameCompleted (mf); mf->unref (); }
Surface8uRef convertCVPixelBufferToSurface( CVPixelBufferRef pixelBufferRef ) { CVPixelBufferLockBaseAddress( pixelBufferRef, 0 ); uint8_t *ptr = reinterpret_cast<uint8_t*>( CVPixelBufferGetBaseAddress( pixelBufferRef ) ); int32_t rowBytes = CVPixelBufferGetBytesPerRow( pixelBufferRef ); OSType type = CVPixelBufferGetPixelFormatType( pixelBufferRef ); size_t width = CVPixelBufferGetWidth( pixelBufferRef ); size_t height = CVPixelBufferGetHeight( pixelBufferRef ); SurfaceChannelOrder sco; if( type == k24RGBPixelFormat ) sco = SurfaceChannelOrder::RGB; else if( type == k32ARGBPixelFormat ) sco = SurfaceChannelOrder::ARGB; else if( type == k24BGRPixelFormat ) sco = SurfaceChannelOrder::BGR; else if( type == k32BGRAPixelFormat ) sco = SurfaceChannelOrder::BGRA; Surface8u *newSurface = new Surface8u( ptr, width, height, rowBytes, sco ); return Surface8uRef( newSurface, [=] ( Surface8u *s ) { ::CVBufferRelease( pixelBufferRef ); delete s; } ); }
Surface8u convertCVPixelBufferToSurface( CVPixelBufferRef pixelBufferRef ) { CVPixelBufferLockBaseAddress( pixelBufferRef, 0 ); uint8_t *ptr = reinterpret_cast<uint8_t*>( CVPixelBufferGetBaseAddress( pixelBufferRef ) ); int32_t rowBytes = CVPixelBufferGetBytesPerRow( pixelBufferRef ); OSType type = CVPixelBufferGetPixelFormatType( pixelBufferRef ); size_t width = CVPixelBufferGetWidth( pixelBufferRef ); size_t height = CVPixelBufferGetHeight( pixelBufferRef ); SurfaceChannelOrder sco; if( type == k24RGBPixelFormat ) sco = SurfaceChannelOrder::RGB; else if( type == k32ARGBPixelFormat ) sco = SurfaceChannelOrder::ARGB; else if( type == k24BGRPixelFormat ) sco = SurfaceChannelOrder::BGR; else if( type == k32BGRAPixelFormat ) sco = SurfaceChannelOrder::BGRA; Surface result( ptr, width, height, rowBytes, sco ); result.setDeallocator( CVPixelBufferDealloc, pixelBufferRef ); return result; }
static int quicktimedrv_record(screenshot_t *screenshot) { if (!video_ready) { return 0; } OSErr theError; // lock buffer theError = CVPixelBufferLockBaseAddress(pixelBuffer, 0); if (theError) { log_debug("quicktime: error locking pixel buffer!"); return -1; } // fill frame unsigned char *buffer = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer); size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer); unsigned int line_size = screenshot->draw_buffer_line_size; int h = screenshot->height; int w = screenshot->width; int xoff = screenshot->x_offset; int yoff = screenshot->y_offset; BYTE *srcBuffer = screenshot->draw_buffer; // move to last line in tgt buffer and to first in source buffer += (video_yoff) * bytesPerRow + video_xoff * 3; srcBuffer += yoff * line_size + xoff; int x, y; for (y = 0; y < h; y++) { int pix = 0; for (x = 0; x < w; x++) { BYTE val = srcBuffer[x]; buffer[pix++] = screenshot->palette->entries[val].red; buffer[pix++] = screenshot->palette->entries[val].green; buffer[pix++] = screenshot->palette->entries[val].blue; } buffer += bytesPerRow; srcBuffer += line_size; } // unlock buffer theError = CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); if (theError) { log_debug("quicktime: error unlocking pixel buffer!"); return -1; } TimeValue64 next = CVGetCurrentHostTime() / divider; TimeValue64 duration = next - timestamp; timestamp = next; // encode frame theError = ICMCompressionSessionEncodeFrame(videoCompressionSession, pixelBuffer, timestamp, duration, kICMValidTime_DisplayTimeStampIsValid | kICMValidTime_DisplayDurationIsValid, NULL, NULL, (void *)NULL); if (theError) { log_debug("quicktime: error encoding frame!"); return -1; } return 0; }
GstBuffer * gst_core_video_buffer_new (CVBufferRef cvbuf, GstVideoInfo * vinfo) { CVPixelBufferRef pixbuf = NULL; GstBuffer *buf; GstCoreVideoMeta *meta; guint n_planes; gsize offset[GST_VIDEO_MAX_PLANES]; gint stride[GST_VIDEO_MAX_PLANES]; if (CFGetTypeID (cvbuf) != CVPixelBufferGetTypeID ()) /* TODO: Do we need to handle other buffer types? */ goto error; pixbuf = (CVPixelBufferRef) cvbuf; if (CVPixelBufferLockBaseAddress (pixbuf, kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) { goto error; } buf = gst_buffer_new (); /* add the corevideo meta to free the underlying corevideo buffer */ meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf, gst_core_video_meta_get_info (), NULL); meta->cvbuf = CVBufferRetain (cvbuf); meta->pixbuf = pixbuf; /* set stride, offset and size */ memset (&offset, 0, sizeof (offset)); memset (&stride, 0, sizeof (stride)); if (CVPixelBufferIsPlanar (pixbuf)) { int i, size, off; n_planes = CVPixelBufferGetPlaneCount (pixbuf); off = 0; for (i = 0; i < n_planes; ++i) { stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i); size = stride[i] * CVPixelBufferGetHeightOfPlane (pixbuf, i); offset[i] = off; off += size; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddressOfPlane (pixbuf, i), size, 0, size, NULL, NULL)); } } else { int size; n_planes = 1; stride[0] = CVPixelBufferGetBytesPerRow (pixbuf); offset[0] = 0; size = stride[0] * vinfo->height; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddress (pixbuf), size, 0, size, NULL, NULL)); } if (vinfo) { GstVideoMeta *video_meta; video_meta = gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE, vinfo->finfo->format, vinfo->width, vinfo->height, n_planes, offset, stride); } return buf; error: return NULL; }
void jit_gl_hap_draw_frame(void *jitob, CVImageBufferRef frame) { t_jit_gl_hap * x = (t_jit_gl_hap*)jitob; CFTypeID imageType = CFGetTypeID(frame); OSType newPixelFormat; if(x->validframe) return; if (imageType == CVPixelBufferGetTypeID()) { // Update the texture CVBufferRetain(frame); if(x->buffer) { CVPixelBufferUnlockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly); CVBufferRelease(x->buffer); } x->buffer = frame; CVPixelBufferLockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly); x->dim[0] = CVPixelBufferGetWidth(x->buffer); x->dim[1] = CVPixelBufferGetHeight(x->buffer); newPixelFormat = CVPixelBufferGetPixelFormatType(x->buffer); if(x->buffer && x->hap_format==JIT_GL_HAP_PF_HAP) { size_t extraRight, extraBottom; unsigned int bitsPerPixel; size_t bytesPerRow; size_t actualBufferSize; CVPixelBufferGetExtendedPixels(x->buffer, NULL, &extraRight, NULL, &extraBottom); x->roundedWidth = x->dim[0] + extraRight; x->roundedHeight = x->dim[1] + extraBottom; if (x->roundedWidth % 4 != 0 || x->roundedHeight % 4 != 0) { x->validframe = 0; return; } switch (newPixelFormat) { case kHapPixelFormatTypeRGB_DXT1: x->newInternalFormat = GL_COMPRESSED_RGB_S3TC_DXT1_EXT; bitsPerPixel = 4; break; case kHapPixelFormatTypeRGBA_DXT5: case kHapPixelFormatTypeYCoCg_DXT5: x->newInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; bitsPerPixel = 8; break; default: // we don't support non-DXT pixel buffers x->validframe = 0; return; break; } x->useshader = (newPixelFormat == kHapPixelFormatTypeYCoCg_DXT5); bytesPerRow = (x->roundedWidth * bitsPerPixel) / 8; x->newDataLength = bytesPerRow * x->roundedHeight; // usually not the full length of the buffer actualBufferSize = CVPixelBufferGetDataSize(x->buffer); // Check the buffer is as large as we expect it to be if (x->newDataLength > actualBufferSize) { x->validframe = 0; return; } // If we got this far we're good to go x->validframe = 1; x->target = GL_TEXTURE_2D; if(!x->flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), 1); x->flipped = 1; } //x->drawhap = 1; } else if(x->buffer) {// && x->hap_format==JIT_GL_HAP_PF_HAP) { if( newPixelFormat == k24RGBPixelFormat ) x->newInternalFormat = GL_RGB8; else if( newPixelFormat == k32BGRAPixelFormat ) x->newInternalFormat = GL_RGBA8; else { x->validframe = 0; return; } x->roundedWidth = x->dim[0]; x->roundedHeight = x->dim[1]; x->newDataLength = CVPixelBufferGetDataSize(x->buffer); x->rowLength = CVPixelBufferGetBytesPerRow( x->buffer ) / (x->hap_format==JIT_GL_HAP_PF_RGB ? 3 : 4); x->target = GL_TEXTURE_RECTANGLE_EXT; if(!x->flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), 1); x->flipped = 1; } x->validframe = 1; } } else { #ifdef MAC_VERSION CGSize imageSize = CVImageBufferGetEncodedSize(frame); bool flipped = CVOpenGLTextureIsFlipped(frame); x->texture = CVOpenGLTextureGetName(frame); x->useshader = 0; x->dim[0] = (t_atom_long)imageSize.width; x->dim[1] = (t_atom_long)imageSize.height; x->validframe = 1; x->target = GL_TEXTURE_RECTANGLE_ARB; if(x->flipped!=flipped) { jit_attr_setlong(x->texoutput, gensym("flip"), flipped); x->flipped = flipped; } #endif } }
GstBuffer * gst_core_media_buffer_new (CMSampleBufferRef sample_buf) { CVImageBufferRef image_buf; CVPixelBufferRef pixel_buf; CMBlockBufferRef block_buf; gchar *data = NULL; UInt32 size; OSStatus status; GstBuffer *buf; GstCoreMediaMeta *meta; image_buf = CMSampleBufferGetImageBuffer (sample_buf); pixel_buf = NULL; block_buf = CMSampleBufferGetDataBuffer (sample_buf); if (image_buf != NULL && CFGetTypeID (image_buf) == CVPixelBufferGetTypeID ()) { pixel_buf = (CVPixelBufferRef) image_buf; if (CVPixelBufferLockBaseAddress (pixel_buf, kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) { goto error; } if (CVPixelBufferIsPlanar (pixel_buf)) { gint plane_count, plane_idx; data = CVPixelBufferGetBaseAddressOfPlane (pixel_buf, 0); size = 0; plane_count = CVPixelBufferGetPlaneCount (pixel_buf); for (plane_idx = 0; plane_idx != plane_count; plane_idx++) { size += CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, plane_idx) * CVPixelBufferGetHeightOfPlane (pixel_buf, plane_idx); } } else { data = CVPixelBufferGetBaseAddress (pixel_buf); size = CVPixelBufferGetBytesPerRow (pixel_buf) * CVPixelBufferGetHeight (pixel_buf); } } else if (block_buf != NULL) { status = CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data); if (status != noErr) goto error; size = CMBlockBufferGetDataLength (block_buf); } else { goto error; } buf = gst_buffer_new (); meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf, gst_core_media_meta_get_info (), NULL); CVBufferRetain ((CVBufferRef)sample_buf); meta->sample_buf = sample_buf; meta->image_buf = image_buf; meta->pixel_buf = pixel_buf; meta->block_buf = block_buf; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data, size, 0, size, NULL, NULL)); return buf; error: return NULL; }
size_t QTPixelBuffer::bytesPerRow() const { return CVPixelBufferGetBytesPerRow(m_pixelBuffer); }
break; case kCVPixelFormatType_420YpCbCr8Planar: data->type = IMGFMT_420P; break; default: _Error("Not supported format."); data->type = IMGFMT_NONE; } auto desc = mp_imgfmt_get_desc(data->type); if (CVPixelBufferIsPlanar(buffer)) { data->planes = CVPixelBufferGetPlaneCount(buffer); Q_ASSERT(data->planes == desc.num_planes); for (int i=0; i<data->planes; ++i) { data->alignedByteSize[i].rwidth() = CVPixelBufferGetBytesPerRowOfPlane(buffer, i); data->alignedByteSize[i].rheight() = CVPixelBufferGetHeightOfPlane(buffer, i); data->bpp += desc.bpp[i] >> (desc.xs[i] + desc.ys[i]); } } else { data->planes = 1; data->alignedByteSize[0].rwidth() = CVPixelBufferGetBytesPerRow(buffer); data->alignedByteSize[0].rheight() = CVPixelBufferGetHeight(buffer); data->bpp = desc.bpp[0]; } data->alignedSize = data->alignedByteSize[0]; data->alignedSize.rwidth() /= desc.bytes[0]; if (::directRendering(data->type)) data->colorspace = MP_CSP_RGB; } #endif
static gboolean gst_core_media_buffer_wrap_pixel_buffer (GstBuffer * buf, GstVideoInfo * info, CVPixelBufferRef pixel_buf, gboolean * has_padding, gboolean map) { guint n_planes; gsize offset[GST_VIDEO_MAX_PLANES] = { 0 }; gint stride[GST_VIDEO_MAX_PLANES] = { 0 }; GstVideoMeta *video_meta; UInt32 size; if (map && CVPixelBufferLockBaseAddress (pixel_buf, 0) != kCVReturnSuccess) { GST_ERROR ("Could not lock pixel buffer base address"); return FALSE; } *has_padding = FALSE; if (CVPixelBufferIsPlanar (pixel_buf)) { gint i, size = 0, plane_offset = 0; n_planes = CVPixelBufferGetPlaneCount (pixel_buf); for (i = 0; i < n_planes; i++) { stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, i); if (stride[i] != GST_VIDEO_INFO_PLANE_STRIDE (info, i)) { *has_padding = TRUE; } size = stride[i] * CVPixelBufferGetHeightOfPlane (pixel_buf, i); offset[i] = plane_offset; plane_offset += size; if (map) { gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddressOfPlane (pixel_buf, i), size, 0, size, NULL, NULL)); } } } else { n_planes = 1; stride[0] = CVPixelBufferGetBytesPerRow (pixel_buf); offset[0] = 0; size = stride[0] * CVPixelBufferGetHeight (pixel_buf); if (map) { gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, CVPixelBufferGetBaseAddress (pixel_buf), size, 0, size, NULL, NULL)); } } video_meta = gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE, GST_VIDEO_INFO_FORMAT (info), info->width, info->height, n_planes, offset, stride); return TRUE; }
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame) { InputStream *ist = s->opaque; VTContext *vt = ist->hwaccel_ctx; CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3]; OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf); CVReturn err; uint8_t *data[4] = { 0 }; int linesize[4] = { 0 }; int planes, ret, i; char codec_str[32]; av_frame_unref(vt->tmp_frame); switch (pixel_format) { case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break; case kCVPixelFormatType_422YpCbCr8: vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break; case kCVPixelFormatType_32BGRA: vt->tmp_frame->format = AV_PIX_FMT_BGRA; break; #ifdef kCFCoreFoundationVersionNumber10_7 case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break; #endif default: av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag); av_log(NULL, AV_LOG_ERROR, "%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt); return AVERROR(ENOSYS); } vt->tmp_frame->width = frame->width; vt->tmp_frame->height = frame->height; ret = av_frame_get_buffer(vt->tmp_frame, 32); if (ret < 0) return ret; err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly); if (err != kCVReturnSuccess) { av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n"); return AVERROR_UNKNOWN; } if (CVPixelBufferIsPlanar(pixbuf)) { planes = CVPixelBufferGetPlaneCount(pixbuf); for (i = 0; i < planes; i++) { data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i); linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i); } } else { data[0] = CVPixelBufferGetBaseAddress(pixbuf); linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf); } av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize, (const uint8_t **)data, linesize, vt->tmp_frame->format, frame->width, frame->height); ret = av_frame_copy_props(vt->tmp_frame, frame); CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly); if (ret < 0) return ret; av_frame_unref(frame); av_frame_move_ref(frame, vt->tmp_frame); return 0; }