//creates raw yv12 from sourceframe
static ComponentResult convertColorSpace(VP8EncoderGlobals glob, ICMCompressorSourceFrameRef sourceFrame)
{
  CVPixelBufferRef sourcePixelBuffer = NULL;
  sourcePixelBuffer = ICMCompressorSourceFrameGetPixelBuffer(sourceFrame);
  CVPixelBufferLockBaseAddress(sourcePixelBuffer, 0);
  //copy our frame to the raw image.  TODO: I'm not checking for any padding here.
  unsigned char *srcBytes = CVPixelBufferGetBaseAddress(sourcePixelBuffer);
  dbg_printf("[vp8e - %08lx] CVPixelBufferGetBaseAddress %x\n", (UInt32)glob, sourcePixelBuffer);
  dbg_printf("[vp8e - %08lx] CopyChunkyYUV422ToPlanarYV12 %dx%d, %x, %d, %x, %d, %x, %d, %x, %d \n", (UInt32)glob,
             glob->width, glob->height,
             CVPixelBufferGetBaseAddress(sourcePixelBuffer),
             CVPixelBufferGetBytesPerRow(sourcePixelBuffer),
             glob->raw->planes[PLANE_Y],
             glob->raw->stride[PLANE_Y],
             glob->raw->planes[PLANE_U],
             glob->raw->stride[PLANE_U],
             glob->raw->planes[PLANE_V],
             glob->raw->stride[PLANE_V]);
  ComponentResult err = CopyChunkyYUV422ToPlanarYV12(glob->width, glob->height,
                                                     CVPixelBufferGetBaseAddress(sourcePixelBuffer),
                                                     CVPixelBufferGetBytesPerRow(sourcePixelBuffer),
                                                     glob->raw->planes[PLANE_Y],
                                                     glob->raw->stride[PLANE_Y],
                                                     glob->raw->planes[PLANE_U],
                                                     glob->raw->stride[PLANE_U],
                                                     glob->raw->planes[PLANE_V],
                                                     glob->raw->stride[PLANE_V]);

  CVPixelBufferUnlockBaseAddress(sourcePixelBuffer, 0);
  dbg_printf("[vp8e - %08lx]  CVPixelBufferUnlockBaseAddress %x\n", sourcePixelBuffer);

  return err;
}
Exemplo n.º 2
0
void
dump_cvpixel_buffer (CVPixelBufferRef pixbuf)
{
  gsize left, right, top, bottom;

  GST_LOG ("buffer %p", pixbuf);
  if (CVPixelBufferLockBaseAddress (pixbuf, 0)) {
    GST_WARNING ("Couldn't lock base adress on pixel buffer !");
    return;
  }
  GST_LOG ("Width:%" G_GSIZE_FORMAT " , Height:%" G_GSIZE_FORMAT,
      CVPixelBufferGetWidth (pixbuf), CVPixelBufferGetHeight (pixbuf));
  GST_LOG ("Format:%" GST_FOURCC_FORMAT,
      GST_FOURCC_ARGS (CVPixelBufferGetPixelFormatType (pixbuf)));
  GST_LOG ("base address:%p", CVPixelBufferGetBaseAddress (pixbuf));
  GST_LOG ("Bytes per row:%" G_GSIZE_FORMAT,
      CVPixelBufferGetBytesPerRow (pixbuf));
  GST_LOG ("Data Size:%" G_GSIZE_FORMAT, CVPixelBufferGetDataSize (pixbuf));
  GST_LOG ("Plane count:%" G_GSIZE_FORMAT, CVPixelBufferGetPlaneCount (pixbuf));
  CVPixelBufferGetExtendedPixels (pixbuf, &left, &right, &top, &bottom);
  GST_LOG ("Extended pixels. left/right/top/bottom : %" G_GSIZE_FORMAT
      "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT,
      left, right, top, bottom);
  CVPixelBufferUnlockBaseAddress (pixbuf, 0);
}
Exemplo n.º 3
0
static int vdadec_decode(AVCodecContext *avctx,
        void *data, int *got_frame, AVPacket *avpkt)
{
    VDADecoderContext *ctx = avctx->priv_data;
    AVFrame *pic = data;
    int ret;

    set_context(avctx);
    ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt);
    restore_context(avctx);
    if (*got_frame) {
        AVBufferRef *buffer = pic->buf[0];
        VDABufferContext *context = av_buffer_get_opaque(buffer);
        CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3];

        CVPixelBufferRetain(cv_buffer);
        CVPixelBufferLockBaseAddress(cv_buffer, 0);
        context->cv_buffer = cv_buffer;
        pic->format = ctx->pix_fmt;
        if (CVPixelBufferIsPlanar(cv_buffer)) {
            int i, count = CVPixelBufferGetPlaneCount(cv_buffer);
            av_assert0(count < 4);
            for (i = 0; i < count; i++) {
                pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i);
                pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i);
            }
        } else {
            pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer);
            pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer);
        }
    }
    avctx->pix_fmt = ctx->pix_fmt;

    return ret;
}
Exemplo n.º 4
0
OSErr qQuickTimeDecoderCallback(void *decompressionTrackingRefCon,
								OSStatus result,
								ICMDecompressionTrackingFlags decompressionTrackingFlags,
								CVPixelBufferRef pixelBuffer,
								TimeValue64 displayTime,
								TimeValue64 displayDuration,
								ICMValidTimeFlags validTimeFlags,
								void *reserved,
								void *sourceFrameRefCon)
{
	OSStatus err;

	// The decompressionTrackingRefCon might actually be a QCamera or a QDecoder, but we are
	// careful to ensure that they begin with the same layout as QDecoderCallbackData.
	QDecoder* decoder = (QDecoder*)decompressionTrackingRefCon;

	// Declare up here because we need to compile on archaic GCC on Win32
	void* base;
	size_t width;
	size_t height;
	size_t size;

//	fprintf(QSTDERR, "\n\tdecode %d ", decoder->outFrameCount);
	
	if (!pixelBuffer) {
		fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no pixel buffer (why?)");
		return noErr;
	}
	if (!(kICMDecompressionTracking_EmittingFrame & decompressionTrackingFlags)) {
		fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no frame emitted (why?)");	
		return noErr;
	}

	decoder->outFrameCount++;
	
	// Lock the pixel-buffer until we're done with it.
	err = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
	if (err != noErr) {
		fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): can't lock CVPixelBuffer");
		// XXXX: so what do we do about it?
		return err;
	}
	// Get info about the raw pixel-buffer data.
	base = (void*)CVPixelBufferGetBaseAddress(pixelBuffer);
	width = CVPixelBufferGetWidth(pixelBuffer);
	height = CVPixelBufferGetHeight(pixelBuffer);
//	size = width*height*4;
	size = height * CVPixelBufferGetBytesPerRow(pixelBuffer);
	
	// Stash the data so that Squeak can retrieve it.
	qStoreCallbackData(base, &(decoder->callbackData), size);

	// We're done with the pixel-buffer
	CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
	
	// Signal the semaphore so that Squeak can grab the data that we just stashed.
	interpreterProxy->signalSemaphoreWithIndex(decoder->semaIndex);
	
	return noErr;
}
Exemplo n.º 5
0
static gpointer
gst_apple_core_video_mem_map (GstMemory * gmem, gsize maxsize,
    GstMapFlags flags)
{
  GstAppleCoreVideoMemory *mem = (GstAppleCoreVideoMemory *) gmem;
  gpointer ret;

  if (!gst_apple_core_video_pixel_buffer_lock (mem->gpixbuf, flags))
    return NULL;

  if (CVPixelBufferIsPlanar (mem->gpixbuf->buf)) {
    ret = CVPixelBufferGetBaseAddressOfPlane (mem->gpixbuf->buf, mem->plane);

    if (ret != NULL)
      GST_DEBUG ("%p: pixbuf %p plane %" G_GSIZE_FORMAT
          " flags %08x: mapped %p", mem, mem->gpixbuf->buf, mem->plane, flags,
          ret);
    else
      GST_ERROR ("%p: invalid plane base address (NULL) for pixbuf %p plane %"
          G_GSIZE_FORMAT, mem, mem->gpixbuf->buf, mem->plane);
  } else {
    ret = CVPixelBufferGetBaseAddress (mem->gpixbuf->buf);

    if (ret != NULL)
      GST_DEBUG ("%p: pixbuf %p flags %08x: mapped %p", mem, mem->gpixbuf->buf,
          flags, ret);
    else
      GST_ERROR ("%p: invalid base address (NULL) for pixbuf %p"
          G_GSIZE_FORMAT, mem, mem->gpixbuf->buf);
  }

  return ret;
}
Exemplo n.º 6
0
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VDAContext  *vda = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;

    av_frame_unref(vda->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    default:
        av_log(NULL, AV_LOG_ERROR,
               "Unsupported pixel format: %u\n", pixel_format);
        return AVERROR(ENOSYS);
    }

    vda->tmp_frame->width  = frame->width;
    vda->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vda->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
                  data, linesize, vda->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vda->tmp_frame, frame);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vda->tmp_frame);

    return 0;
}
Exemplo n.º 7
0
int qEncodeAPI(QEncoder* encoder, char* bytes, int byteSize)
{
    OSErr err;
    CVPixelBufferPoolRef pixelBufferPool;
    CVPixelBufferRef pixelBuffer;
    unsigned char* baseAddress;
    size_t bufferSize;

    // Grab a pixel buffer from the pool (ICMCompressionSessionEncodeFrame() needs the input
    // data to be passed in as a CVPixelBufferRef).
    pixelBufferPool = ICMCompressionSessionGetPixelBufferPool(encoder->session);
    err = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBuffer);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not obtain a pixel buffer from pool");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        return -5;
    }

    // Lock the pixel-buffer so that we can copy our data into it for encoding
    // XXXX: would be nice to avoid this copy.
    err = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not lock the pixel buffer");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        CVPixelBufferRelease(pixelBuffer);
        return -5;
    }
    baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer);
//	bufferSize = CVPixelBufferGetWidth(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer) * 4;
    bufferSize = CVPixelBufferGetBytesPerRow(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer);

    // XXXX: for now, just for debugging.  For production, we should notice if this happens and deal with it "appropriately".
    if (byteSize != bufferSize) {
        fprintf(QSTDERR, "\nqEncodeQT(): input data size (%d) does not match pixel-buffer data size (%d)", byteSize, bufferSize);
    }

    // Copy the data and unlock the buffer
    memcpy(baseAddress, bytes, bufferSize);
    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);

    // Encode the frame (now in pixel-buffer form).
    err = ICMCompressionSessionEncodeFrame(	encoder->session,
                                            pixelBuffer,
                                            0, 0, 0, // we're not specifying a frame time
                                            NULL,
                                            NULL,
                                            NULL);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not encode the frame");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        CVPixelBufferRelease(pixelBuffer);
        return -5;
    }

    CVPixelBufferRelease(pixelBuffer);
    return 0;
}
Exemplo n.º 8
0
size_t QTPixelBuffer::dataProviderGetBytesAtPositionCallback(void* refcon, void* buffer, size_t position, size_t count)
{
    char* data = (char*)CVPixelBufferGetBaseAddress(static_cast<CVPixelBufferRef>(refcon));
    size_t size = CVPixelBufferGetDataSize(static_cast<CVPixelBufferRef>(refcon));
    if (size - position < count)
        count = size - position;

    memcpy(buffer, data+position, count);
    return count;
}
Exemplo n.º 9
0
void jit_gl_hap_submit_nonhap_texture(t_jit_gl_hap *x)
{
	GLenum type = GL_UNSIGNED_BYTE;
	GLenum format = (x->hap_format==JIT_GL_HAP_PF_RGB ? GL_RGB : GL_RGBA);
	GLvoid *baseAddress = CVPixelBufferGetBaseAddress(x->buffer);

	glPushAttrib(GL_ENABLE_BIT | GL_TEXTURE_BIT);
	glPushClientAttrib(GL_CLIENT_PIXEL_STORE_BIT);
		
	// Create a new texture if our current one isn't adequate
	if (
		!x->texture ||
		(x->dim[0] > x->backingWidth) ||
		(x->dim[1] > x->backingHeight) ||
		(x->newInternalFormat != x->internalFormat)
	) {
		
		glEnable(x->target);
		
		if (x->texture != 0) {
			glDeleteTextures(1, &x->texture);
		}
		glGenTextures(1, &x->texture);
		glBindTexture(x->target, x->texture);
		x->deletetex = 1;
		
		x->backingWidth = x->dim[0];
		x->backingHeight = x->dim[1];
		
		glTexParameteri(x->target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		glTexParameteri(x->target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		glTexParameteri(x->target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
		glTexParameteri(x->target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);		

		glTexImage2D(x->target, 0, x->newInternalFormat, x->backingWidth, x->backingHeight, 0, format, type, NULL);
		x->internalFormat = x->newInternalFormat;
	}
	else {
		glBindTexture(x->target, x->texture);
	}

	glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
	glPixelStorei(GL_UNPACK_ROW_LENGTH, x->rowLength);
	glTexSubImage2D(x->target, 0,0,0, x->roundedWidth, x->roundedHeight, format, type, baseAddress);

	glPopClientAttrib();
	glPopAttrib();

}
Exemplo n.º 10
0
static void
VDADecoderCallback (void *decompressionOutputRefCon, CFDictionaryRef frameInfo, OSStatus status, uint32_t infoFlags, CVImageBufferRef imageBuffer)
{
	MoonVDADecoder *decoder = (MoonVDADecoder *) decompressionOutputRefCon;
	VideoStream *vs = (VideoStream *) decoder->GetStream ();

	// FIXME: Is this always 1 thread?  Can we optimize this
	decoder->GetDeployment ()->RegisterThread ();

	Deployment::SetCurrent (decoder->GetDeployment ());

	if (imageBuffer == NULL) {
		return;
	}

	OSType format_type = CVPixelBufferGetPixelFormatType (imageBuffer);
	if (format_type != kCVPixelFormatType_422YpCbCr8) {
		g_warning ("Mismatched format in VDA");
		return;
	}

	MediaFrame *mf = (MediaFrame *) CFDictionaryGetValue (frameInfo, CFSTR ("MoonMediaFrame"));

	mf->AddState (MediaFrameVUY2);
	mf->FreeBuffer ();
	mf->SetBufLen (0);

	mf->srcSlideY = 0;
	mf->srcSlideH = vs->GetHeight ();

	mf->width = vs->GetWidth ();
	mf->height = vs->GetHeight ();

	CVPixelBufferLockBaseAddress (imageBuffer, 0);

	mf->data_stride [0] = (uint8_t *) CVPixelBufferGetBaseAddress (imageBuffer);
	mf->srcStride [0] = CVPixelBufferGetBytesPerRow (imageBuffer);

	mf->AddState (MediaFrameDecoded);

	mf->decoder_specific_data = imageBuffer;
	CVPixelBufferRetain (imageBuffer);

	decoder->ReportDecodeFrameCompleted (mf);

	mf->unref ();
}
Exemplo n.º 11
0
 void
 PixelBufferSource::pushPixelBuffer(void *data, size_t size)
 {
     
     auto outp = m_output.lock();
     
     if(outp) {
         void* loc = CVPixelBufferGetBaseAddress((CVPixelBufferRef)m_pixelBuffer);
         CVPixelBufferLockBaseAddress((CVPixelBufferRef)m_pixelBuffer, 0);
         memcpy(loc, data, size);
         CVPixelBufferUnlockBaseAddress((CVPixelBufferRef)m_pixelBuffer, 0);
         
         VideoBufferMetadata md(0.);
         md.setData(kLayerGame, shared_from_this());
         
         outp->pushBuffer((const uint8_t*)m_pixelBuffer, sizeof(CVPixelBufferRef), md);
     }
 }
Exemplo n.º 12
0
Surface8uRef convertCVPixelBufferToSurface( CVPixelBufferRef pixelBufferRef )
{
	CVPixelBufferLockBaseAddress( pixelBufferRef, 0 );
	uint8_t *ptr = reinterpret_cast<uint8_t*>( CVPixelBufferGetBaseAddress( pixelBufferRef ) );
	int32_t rowBytes = CVPixelBufferGetBytesPerRow( pixelBufferRef );
	OSType type = CVPixelBufferGetPixelFormatType( pixelBufferRef );
	size_t width = CVPixelBufferGetWidth( pixelBufferRef );
	size_t height = CVPixelBufferGetHeight( pixelBufferRef );
	SurfaceChannelOrder sco;
	if( type == k24RGBPixelFormat )
		sco = SurfaceChannelOrder::RGB;
	else if( type == k32ARGBPixelFormat )
		sco = SurfaceChannelOrder::ARGB;
	else if( type == k24BGRPixelFormat )
		sco = SurfaceChannelOrder::BGR;
	else if( type == k32BGRAPixelFormat )
		sco = SurfaceChannelOrder::BGRA;
	Surface8u *newSurface = new Surface8u( ptr, width, height, rowBytes, sco );
	return Surface8uRef( newSurface, [=] ( Surface8u *s ) { ::CVBufferRelease( pixelBufferRef ); delete s; } );
}
Exemplo n.º 13
0
Surface8u convertCVPixelBufferToSurface( CVPixelBufferRef pixelBufferRef )
{
	CVPixelBufferLockBaseAddress( pixelBufferRef, 0 );
	uint8_t *ptr = reinterpret_cast<uint8_t*>( CVPixelBufferGetBaseAddress( pixelBufferRef ) );
	int32_t rowBytes = CVPixelBufferGetBytesPerRow( pixelBufferRef );
	OSType type = CVPixelBufferGetPixelFormatType( pixelBufferRef );
	size_t width = CVPixelBufferGetWidth( pixelBufferRef );
	size_t height = CVPixelBufferGetHeight( pixelBufferRef );
	SurfaceChannelOrder sco;
	if( type == k24RGBPixelFormat )
		sco = SurfaceChannelOrder::RGB;
	else if( type == k32ARGBPixelFormat )
		sco = SurfaceChannelOrder::ARGB;
	else if( type == k24BGRPixelFormat )
		sco = SurfaceChannelOrder::BGR;
	else if( type == k32BGRAPixelFormat )
		sco = SurfaceChannelOrder::BGRA;
	Surface result( ptr, width, height, rowBytes, sco );
	result.setDeallocator( CVPixelBufferDealloc, pixelBufferRef );
	return result;
}
Exemplo n.º 14
0
	virtual void*data()
	{
		if (m_ref == NULL)	return NULL;
		
		if (!m_locked)
			CVPixelBufferLockBaseAddress(m_ref, 0);
		
		m_locked = true;
		
		unsigned char*data = (unsigned char*)CVPixelBufferGetBaseAddress(m_ref);
		Coord2DI s = size();
		
//		int i,j;
//		
//		minB = 255;
//		minG = 255;
//		minR = 255;
//		
//		maxG = 0;
//		maxB = 0;
//		maxR = 0;
//		
//		
//		for (j=0; j<s.y; j+=64)
//		{
//			for (i=0; i<s.x*4; i+=4)
//			{
//				int k = i + j*s.x*4;
//				if (data[k] < minB)	minB = data[k];
//				if (data[k+1] < minG)	minG = data[k+1];
//				if (data[k+2] < minR)	minR = data[k+2];
//				
//				if (data[k] > maxB)	maxB = data[k];
//				if (data[k+1] > maxG)	maxG = data[k+1];
//				if (data[k+2] > maxR)	maxR = data[k+2];
//			}
//		}
		
		return (void*)data;
	}
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VTContext  *vt = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;
    char codec_str[32];

    av_frame_unref(vt->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    case kCVPixelFormatType_32BGRA:           vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
    default:
        av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
        av_log(NULL, AV_LOG_ERROR,
               "%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
        return AVERROR(ENOSYS);
    }

    vt->tmp_frame->width  = frame->width;
    vt->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vt->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
                  (const uint8_t **)data, linesize, vt->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vt->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vt->tmp_frame);

    return 0;
}
Exemplo n.º 16
0
static int quicktimedrv_record(screenshot_t *screenshot)
{
    if (!video_ready) {
        return 0;
    }

    OSErr theError;

    // lock buffer
    theError = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    if (theError) {
        log_debug("quicktime: error locking pixel buffer!");
        return -1;
    }

    // fill frame
    unsigned char *buffer = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer);
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer);

    unsigned int line_size = screenshot->draw_buffer_line_size;
    int h = screenshot->height;
    int w = screenshot->width;
    int xoff = screenshot->x_offset;
    int yoff = screenshot->y_offset;
    BYTE *srcBuffer = screenshot->draw_buffer;

    // move to last line in tgt buffer and to first in source
    buffer += (video_yoff) * bytesPerRow + video_xoff * 3;
    srcBuffer += yoff * line_size + xoff;

    int x, y;
    for (y = 0; y < h; y++) {
        int pix = 0;
        for (x = 0; x < w; x++) {
            BYTE val = srcBuffer[x];
            buffer[pix++] = screenshot->palette->entries[val].red;
            buffer[pix++] = screenshot->palette->entries[val].green;
            buffer[pix++] = screenshot->palette->entries[val].blue;
        }
        buffer += bytesPerRow;
        srcBuffer += line_size;
    }

    // unlock buffer
    theError = CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
    if (theError) {
        log_debug("quicktime: error unlocking pixel buffer!");
        return -1;
    }

    TimeValue64 next = CVGetCurrentHostTime() / divider;
    TimeValue64 duration = next - timestamp;
    timestamp = next;

    // encode frame
    theError = ICMCompressionSessionEncodeFrame(videoCompressionSession,
                                                pixelBuffer,
                                                timestamp, duration,
                                                kICMValidTime_DisplayTimeStampIsValid |
                                                kICMValidTime_DisplayDurationIsValid,
                                                NULL, NULL, (void *)NULL);
    if (theError) {
        log_debug("quicktime: error encoding frame!");
        return -1;
    }

    return 0;
}
Exemplo n.º 17
0
GstBuffer *
gst_core_video_buffer_new (CVBufferRef cvbuf, GstVideoInfo * vinfo)
{
  CVPixelBufferRef pixbuf = NULL;
  GstBuffer *buf;
  GstCoreVideoMeta *meta;
  guint n_planes;
  gsize offset[GST_VIDEO_MAX_PLANES];
  gint stride[GST_VIDEO_MAX_PLANES];

  if (CFGetTypeID (cvbuf) != CVPixelBufferGetTypeID ())
    /* TODO: Do we need to handle other buffer types? */
    goto error;

  pixbuf = (CVPixelBufferRef) cvbuf;

  if (CVPixelBufferLockBaseAddress (pixbuf,
          kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
    goto error;
  }

  buf = gst_buffer_new ();

  /* add the corevideo meta to free the underlying corevideo buffer */
  meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf,
      gst_core_video_meta_get_info (), NULL);
  meta->cvbuf = CVBufferRetain (cvbuf);
  meta->pixbuf = pixbuf;

  /* set stride, offset and size */
  memset (&offset, 0, sizeof (offset));
  memset (&stride, 0, sizeof (stride));

  if (CVPixelBufferIsPlanar (pixbuf)) {
    int i, size, off;

    n_planes = CVPixelBufferGetPlaneCount (pixbuf);
    off = 0;
    for (i = 0; i < n_planes; ++i) {
      stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i);
      size = stride[i] * CVPixelBufferGetHeightOfPlane (pixbuf, i);
      offset[i] = off;
      off += size;

      gst_buffer_append_memory (buf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              CVPixelBufferGetBaseAddressOfPlane (pixbuf, i), size, 0, size,
              NULL, NULL));
    }
  } else {
    int size;

    n_planes = 1;
    stride[0] = CVPixelBufferGetBytesPerRow (pixbuf);
    offset[0] = 0;
    size = stride[0] * vinfo->height;

    gst_buffer_append_memory (buf,
        gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
            CVPixelBufferGetBaseAddress (pixbuf), size, 0, size, NULL, NULL));
  }

  if (vinfo) {
    GstVideoMeta *video_meta;

    video_meta =
        gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
        vinfo->finfo->format, vinfo->width, vinfo->height,
        n_planes, offset, stride);
  }

  return buf;

error:
  return NULL;
}
Exemplo n.º 18
0
void jit_gl_hap_submit_texture(t_jit_gl_hap *x)
{
	GLenum type;
	GLvoid *baseAddress = CVPixelBufferGetBaseAddress(x->buffer);
	
	glPushAttrib(GL_ENABLE_BIT | GL_TEXTURE_BIT);
	glPushClientAttrib(GL_CLIENT_PIXEL_STORE_BIT);
		
	// Create a new texture if our current one isn't adequate
	if (
		!x->texture ||
		(x->roundedWidth > x->backingWidth) ||
		(x->roundedHeight > x->backingHeight) ||
		(x->newInternalFormat != x->internalFormat)
	) {
		glEnable(x->target);
		
		if (x->texture != 0) {
			glDeleteTextures(1, &x->texture);
		}
		glGenTextures(1, &x->texture);
		glBindTexture(x->target, x->texture);
		x->deletetex = 1;
		
		// On NVIDIA hardware there is a massive slowdown if DXT textures aren't POT-dimensioned, so we use POT-dimensioned backing
		x->backingWidth = 1;
		while (x->backingWidth < x->roundedWidth) x->backingWidth <<= 1;
		x->backingHeight = 1;
		while (x->backingHeight < x->roundedHeight) x->backingHeight <<= 1;
		
		glTexParameteri(x->target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		glTexParameteri(x->target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		glTexParameteri(x->target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
		glTexParameteri(x->target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
#ifdef MAC_VERSION
		glTexParameteri(x->target, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
		type = GL_UNSIGNED_INT_8_8_8_8_REV;
#else
		type = GL_UNSIGNED_BYTE;
#endif

		// We allocate the texture with no pixel data, then use CompressedTexSubImage to update the content region			
		glTexImage2D(x->target, 0, x->newInternalFormat, x->backingWidth, x->backingHeight, 0, GL_BGRA, type, NULL);
		
		x->internalFormat = x->newInternalFormat;
	}
	else {
		glBindTexture(x->target, x->texture);
	}

#ifdef MAC_VERSION
	glTextureRangeAPPLE(GL_TEXTURE_2D, x->newDataLength, baseAddress);
	glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_TRUE);
#else
// not sure what this should be, so leaving as default for now
//	glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
#endif

	glCompressedTexSubImage2D(GL_TEXTURE_2D,
							  0,
							  0,
							  0,
							  x->roundedWidth,
							  x->roundedHeight,
							  x->newInternalFormat,
							  x->newDataLength,
							  baseAddress);

	glPopClientAttrib();
	glPopAttrib();

}
Exemplo n.º 19
0
void* QTPixelBuffer::baseAddress()
{
    return CVPixelBufferGetBaseAddress(m_pixelBuffer);
}
Exemplo n.º 20
0
static gboolean
gst_core_media_buffer_wrap_pixel_buffer (GstBuffer * buf, GstVideoInfo * info,
    CVPixelBufferRef pixel_buf, gboolean * has_padding, gboolean map)
{
  guint n_planes;
  gsize offset[GST_VIDEO_MAX_PLANES] = { 0 };
  gint stride[GST_VIDEO_MAX_PLANES] = { 0 };
  GstVideoMeta *video_meta;
  UInt32 size;

  if (map && CVPixelBufferLockBaseAddress (pixel_buf, 0) != kCVReturnSuccess) {
    GST_ERROR ("Could not lock pixel buffer base address");
    return FALSE;
  }

  *has_padding = FALSE;

  if (CVPixelBufferIsPlanar (pixel_buf)) {
    gint i, size = 0, plane_offset = 0;

    n_planes = CVPixelBufferGetPlaneCount (pixel_buf);
    for (i = 0; i < n_planes; i++) {
      stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, i);

      if (stride[i] != GST_VIDEO_INFO_PLANE_STRIDE (info, i)) {
        *has_padding = TRUE;
      }

      size = stride[i] * CVPixelBufferGetHeightOfPlane (pixel_buf, i);
      offset[i] = plane_offset;
      plane_offset += size;

      if (map) {
        gst_buffer_append_memory (buf,
            gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
                CVPixelBufferGetBaseAddressOfPlane (pixel_buf, i), size, 0,
                size, NULL, NULL));
      }
    }
  } else {

    n_planes = 1;
    stride[0] = CVPixelBufferGetBytesPerRow (pixel_buf);
    offset[0] = 0;
    size = stride[0] * CVPixelBufferGetHeight (pixel_buf);

    if (map) {
      gst_buffer_append_memory (buf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              CVPixelBufferGetBaseAddress (pixel_buf), size, 0, size, NULL,
              NULL));
    }
  }

  video_meta =
      gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
      GST_VIDEO_INFO_FORMAT (info), info->width, info->height, n_planes, offset,
      stride);

  return TRUE;
}
Exemplo n.º 21
0
GstBuffer *
gst_core_media_buffer_new (CMSampleBufferRef sample_buf)
{
  CVImageBufferRef image_buf;
  CVPixelBufferRef pixel_buf;
  CMBlockBufferRef block_buf;
  gchar *data = NULL;
  UInt32 size;
  OSStatus status;
  GstBuffer *buf;
  GstCoreMediaMeta *meta;

  image_buf = CMSampleBufferGetImageBuffer (sample_buf);
  pixel_buf = NULL;
  block_buf = CMSampleBufferGetDataBuffer (sample_buf);

  if (image_buf != NULL &&
      CFGetTypeID (image_buf) == CVPixelBufferGetTypeID ()) {
    pixel_buf = (CVPixelBufferRef) image_buf;

    if (CVPixelBufferLockBaseAddress (pixel_buf,
            kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
      goto error;
    }

    if (CVPixelBufferIsPlanar (pixel_buf)) {
      gint plane_count, plane_idx;

      data = CVPixelBufferGetBaseAddressOfPlane (pixel_buf, 0);

      size = 0;
      plane_count = CVPixelBufferGetPlaneCount (pixel_buf);
      for (plane_idx = 0; plane_idx != plane_count; plane_idx++) {
        size += CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, plane_idx) *
            CVPixelBufferGetHeightOfPlane (pixel_buf, plane_idx);
      }
    } else {
      data = CVPixelBufferGetBaseAddress (pixel_buf);
      size = CVPixelBufferGetBytesPerRow (pixel_buf) *
          CVPixelBufferGetHeight (pixel_buf);
    }
  } else if (block_buf != NULL) {
    status = CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data);
    if (status != noErr)
      goto error;
    size = CMBlockBufferGetDataLength (block_buf);
  } else {
    goto error;
  }

  buf = gst_buffer_new ();

  meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf,
      gst_core_media_meta_get_info (), NULL);
  CVBufferRetain ((CVBufferRef)sample_buf);
  meta->sample_buf = sample_buf;
  meta->image_buf = image_buf;
  meta->pixel_buf = pixel_buf;
  meta->block_buf = block_buf;

  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data,
          size, 0, size, NULL, NULL));

  return buf;

error:
  return NULL;
}
Exemplo n.º 22
0
const void* QTPixelBuffer::dataProviderGetBytePointerCallback(void* refcon)
{
    CVPixelBufferLockBaseAddress(static_cast<CVPixelBufferRef>(refcon), 0);
    return CVPixelBufferGetBaseAddress(static_cast<CVPixelBufferRef>(refcon));
}