Пример #1
0
VideoFrame VideoDecoderVDA::frame()
{
    DPTR_D(VideoDecoderVDA);
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
    if (!cv_buffer) {
        qDebug("Frame buffer is empty.");
        return VideoFrame();
    }
    if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
        qDebug("Empty frame buffer");
        return VideoFrame();
    }
    VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
        return VideoFrame();
    }
    // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
    class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
        bool glinterop;
        CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
    public:
        SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
            //CVPixelBufferRetain(cvbuf);
        }
        ~SurfaceInteropCVBuffer() {
            CVPixelBufferRelease(cvbuf);
        }
        void* mapToHost(const VideoFormat &format, void *handle, int plane) {
            Q_UNUSED(plane);
            CVPixelBufferLockBaseAddress(cvbuf, 0);
            const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
            if (!fmt.isValid()) {
                CVPixelBufferUnlockBaseAddress(cvbuf, 0);
                return NULL;
            }
            const int w = CVPixelBufferGetWidth(cvbuf);
            const int h = CVPixelBufferGetHeight(cvbuf);
            uint8_t *src[3];
            int pitch[3];
            for (int i = 0; i <fmt.planeCount(); ++i) {
                // get address results in internal copy
                src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
                pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
            }
            CVPixelBufferUnlockBaseAddress(cvbuf, 0);
            //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
            VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
            if (fmt != format)
                frame = frame.to(format);
            VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
            frame.setTimestamp(f->timestamp());
            frame.setDisplayAspectRatio(f->displayAspectRatio());
            *f = frame;
            return f;
        }
Пример #2
0
void
dump_cvpixel_buffer (CVPixelBufferRef pixbuf)
{
  gsize left, right, top, bottom;

  GST_LOG ("buffer %p", pixbuf);
  if (CVPixelBufferLockBaseAddress (pixbuf, 0)) {
    GST_WARNING ("Couldn't lock base adress on pixel buffer !");
    return;
  }
  GST_LOG ("Width:%" G_GSIZE_FORMAT " , Height:%" G_GSIZE_FORMAT,
      CVPixelBufferGetWidth (pixbuf), CVPixelBufferGetHeight (pixbuf));
  GST_LOG ("Format:%" GST_FOURCC_FORMAT,
      GST_FOURCC_ARGS (CVPixelBufferGetPixelFormatType (pixbuf)));
  GST_LOG ("base address:%p", CVPixelBufferGetBaseAddress (pixbuf));
  GST_LOG ("Bytes per row:%" G_GSIZE_FORMAT,
      CVPixelBufferGetBytesPerRow (pixbuf));
  GST_LOG ("Data Size:%" G_GSIZE_FORMAT, CVPixelBufferGetDataSize (pixbuf));
  GST_LOG ("Plane count:%" G_GSIZE_FORMAT, CVPixelBufferGetPlaneCount (pixbuf));
  CVPixelBufferGetExtendedPixels (pixbuf, &left, &right, &top, &bottom);
  GST_LOG ("Extended pixels. left/right/top/bottom : %" G_GSIZE_FORMAT
      "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT,
      left, right, top, bottom);
  CVPixelBufferUnlockBaseAddress (pixbuf, 0);
}
Пример #3
0
Файл: vda.c Проект: AsamQi/vlc
/*****************************************************************************
 * vda_Copy422YpCbCr8: copy 2vuy CVPixelBuffer to picture_t
 *****************************************************************************/
static void vda_Copy422YpCbCr8( picture_t *p_pic,
                                CVPixelBufferRef buffer )
{
    int i_plane, i_line, i_dst_stride, i_src_stride;
    uint8_t *p_dst, *p_src;

    CVPixelBufferLockBaseAddress( buffer, 0 );

    for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ )
    {
        p_dst = p_pic->p[i_plane].p_pixels;
        p_src = CVPixelBufferGetBaseAddressOfPlane( buffer, i_plane );
        i_dst_stride  = p_pic->p[i_plane].i_pitch;
        i_src_stride  = CVPixelBufferGetBytesPerRowOfPlane( buffer, i_plane );

        for( i_line = 0; i_line < p_pic->p[i_plane].i_visible_lines ; i_line++ )
        {
            memcpy( p_dst, p_src, i_src_stride );

            p_src += i_src_stride;
            p_dst += i_dst_stride;
        }
    }

    CVPixelBufferUnlockBaseAddress( buffer, 0 );
}
Пример #4
0
Файл: vda.c Проект: AsamQi/vlc
/*****************************************************************************
 * vda_Copy420YpCbCr8Planar: copy y420 CVPixelBuffer to picture_t
 *****************************************************************************/
static void vda_Copy420YpCbCr8Planar( picture_t *p_pic,
                                      CVPixelBufferRef buffer,
                                      unsigned i_width,
                                      unsigned i_height,
                                      copy_cache_t *cache )
{
    uint8_t *pp_plane[3];
    size_t  pi_pitch[3];

    if (!buffer)
        return;

    CVPixelBufferLockBaseAddress( buffer, 0 );

    for( int i = 0; i < 3; i++ )
    {
        pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane( buffer, i );
        pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane( buffer, i );
    }

    CopyFromYv12( p_pic, pp_plane, pi_pitch,
                  i_width, i_height, cache );

    CVPixelBufferUnlockBaseAddress( buffer, 0 );
}
Пример #5
0
OSErr qQuickTimeDecoderCallback(void *decompressionTrackingRefCon,
								OSStatus result,
								ICMDecompressionTrackingFlags decompressionTrackingFlags,
								CVPixelBufferRef pixelBuffer,
								TimeValue64 displayTime,
								TimeValue64 displayDuration,
								ICMValidTimeFlags validTimeFlags,
								void *reserved,
								void *sourceFrameRefCon)
{
	OSStatus err;

	// The decompressionTrackingRefCon might actually be a QCamera or a QDecoder, but we are
	// careful to ensure that they begin with the same layout as QDecoderCallbackData.
	QDecoder* decoder = (QDecoder*)decompressionTrackingRefCon;

	// Declare up here because we need to compile on archaic GCC on Win32
	void* base;
	size_t width;
	size_t height;
	size_t size;

//	fprintf(QSTDERR, "\n\tdecode %d ", decoder->outFrameCount);
	
	if (!pixelBuffer) {
		fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no pixel buffer (why?)");
		return noErr;
	}
	if (!(kICMDecompressionTracking_EmittingFrame & decompressionTrackingFlags)) {
		fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): no frame emitted (why?)");	
		return noErr;
	}

	decoder->outFrameCount++;
	
	// Lock the pixel-buffer until we're done with it.
	err = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
	if (err != noErr) {
		fprintf(QSTDERR, "\nqQuickTimeDecoderCallback(): can't lock CVPixelBuffer");
		// XXXX: so what do we do about it?
		return err;
	}
	// Get info about the raw pixel-buffer data.
	base = (void*)CVPixelBufferGetBaseAddress(pixelBuffer);
	width = CVPixelBufferGetWidth(pixelBuffer);
	height = CVPixelBufferGetHeight(pixelBuffer);
//	size = width*height*4;
	size = height * CVPixelBufferGetBytesPerRow(pixelBuffer);
	
	// Stash the data so that Squeak can retrieve it.
	qStoreCallbackData(base, &(decoder->callbackData), size);

	// We're done with the pixel-buffer
	CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
	
	// Signal the semaphore so that Squeak can grab the data that we just stashed.
	interpreterProxy->signalSemaphoreWithIndex(decoder->semaIndex);
	
	return noErr;
}
Пример #6
0
static void release_buffer(void *opaque, uint8_t *data)
{
    VDABufferContext *context = opaque;
    CVPixelBufferUnlockBaseAddress(context->cv_buffer, 0);
    CVPixelBufferRelease(context->cv_buffer);
    av_free(context);
}
Пример #7
0
static void CVPX_I420(filter_t *p_filter, picture_t *sourcePicture, picture_t *destinationPicture)
{
    VLC_UNUSED(p_filter);
    picture_sys_t *picsys = sourcePicture->p_sys;

    if (picsys == NULL)
        return;

    if (picsys->pixelBuffer == nil)
        return;

    unsigned width = CVPixelBufferGetWidthOfPlane(picsys->pixelBuffer, 0);
    unsigned height = CVPixelBufferGetHeightOfPlane(picsys->pixelBuffer, 0);

    if (width == 0 || height == 0)
        return;

    uint8_t *pp_plane[2];
    size_t pi_pitch[2];

    CVPixelBufferLockBaseAddress(picsys->pixelBuffer, kCVPixelBufferLock_ReadOnly);

    for (int i = 0; i < 2; i++) {
        pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane(picsys->pixelBuffer, i);
        pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(picsys->pixelBuffer, i);
    }

    CopyFromNv12ToI420(destinationPicture, pp_plane, pi_pitch, height);

    CVPixelBufferUnlockBaseAddress(picsys->pixelBuffer, kCVPixelBufferLock_ReadOnly);
}
//creates raw yv12 from sourceframe
static ComponentResult convertColorSpace(VP8EncoderGlobals glob, ICMCompressorSourceFrameRef sourceFrame)
{
  CVPixelBufferRef sourcePixelBuffer = NULL;
  sourcePixelBuffer = ICMCompressorSourceFrameGetPixelBuffer(sourceFrame);
  CVPixelBufferLockBaseAddress(sourcePixelBuffer, 0);
  //copy our frame to the raw image.  TODO: I'm not checking for any padding here.
  unsigned char *srcBytes = CVPixelBufferGetBaseAddress(sourcePixelBuffer);
  dbg_printf("[vp8e - %08lx] CVPixelBufferGetBaseAddress %x\n", (UInt32)glob, sourcePixelBuffer);
  dbg_printf("[vp8e - %08lx] CopyChunkyYUV422ToPlanarYV12 %dx%d, %x, %d, %x, %d, %x, %d, %x, %d \n", (UInt32)glob,
             glob->width, glob->height,
             CVPixelBufferGetBaseAddress(sourcePixelBuffer),
             CVPixelBufferGetBytesPerRow(sourcePixelBuffer),
             glob->raw->planes[PLANE_Y],
             glob->raw->stride[PLANE_Y],
             glob->raw->planes[PLANE_U],
             glob->raw->stride[PLANE_U],
             glob->raw->planes[PLANE_V],
             glob->raw->stride[PLANE_V]);
  ComponentResult err = CopyChunkyYUV422ToPlanarYV12(glob->width, glob->height,
                                                     CVPixelBufferGetBaseAddress(sourcePixelBuffer),
                                                     CVPixelBufferGetBytesPerRow(sourcePixelBuffer),
                                                     glob->raw->planes[PLANE_Y],
                                                     glob->raw->stride[PLANE_Y],
                                                     glob->raw->planes[PLANE_U],
                                                     glob->raw->stride[PLANE_U],
                                                     glob->raw->planes[PLANE_V],
                                                     glob->raw->stride[PLANE_V]);

  CVPixelBufferUnlockBaseAddress(sourcePixelBuffer, 0);
  dbg_printf("[vp8e - %08lx]  CVPixelBufferUnlockBaseAddress %x\n", sourcePixelBuffer);

  return err;
}
Пример #9
0
	bool getImage(const VideoFrame &videoFrame, void *dest, ImgScaler *nv12ToRGB32) override
	{
		{
			QMutexLocker locker(&m_buffersMutex);
			if (m_buffers.indexOf(videoFrame.surfaceId) < 0)
				return false;
		}

		CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)videoFrame.surfaceId;
		if (CVPixelBufferGetPixelFormatType(pixelBuffer) == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange)
		{
			CVPixelBufferLockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);

			const quint8 *srcData[2] = {
				(const quint8 *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0),
				(const quint8 *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1)
			};
			const qint32 srcLinesize[2] = {
				(qint32)CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0),
				(qint32)CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1)
			};

			nv12ToRGB32->scale((const void **)srcData, srcLinesize, dest);

			CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);

			return true;
		}

		return false;
	}
Пример #10
0
static void
gst_core_video_meta_free (GstCoreVideoMeta * meta, GstBuffer * buf)
{
  if (meta->pixbuf != NULL) {
    CVPixelBufferUnlockBaseAddress (meta->pixbuf, kCVPixelBufferLock_ReadOnly);
  }

  CVBufferRelease (meta->cvbuf);
}
Пример #11
0
int qEncodeAPI(QEncoder* encoder, char* bytes, int byteSize)
{
    OSErr err;
    CVPixelBufferPoolRef pixelBufferPool;
    CVPixelBufferRef pixelBuffer;
    unsigned char* baseAddress;
    size_t bufferSize;

    // Grab a pixel buffer from the pool (ICMCompressionSessionEncodeFrame() needs the input
    // data to be passed in as a CVPixelBufferRef).
    pixelBufferPool = ICMCompressionSessionGetPixelBufferPool(encoder->session);
    err = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBuffer);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not obtain a pixel buffer from pool");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        return -5;
    }

    // Lock the pixel-buffer so that we can copy our data into it for encoding
    // XXXX: would be nice to avoid this copy.
    err = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not lock the pixel buffer");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        CVPixelBufferRelease(pixelBuffer);
        return -5;
    }
    baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer);
//	bufferSize = CVPixelBufferGetWidth(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer) * 4;
    bufferSize = CVPixelBufferGetBytesPerRow(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer);

    // XXXX: for now, just for debugging.  For production, we should notice if this happens and deal with it "appropriately".
    if (byteSize != bufferSize) {
        fprintf(QSTDERR, "\nqEncodeQT(): input data size (%d) does not match pixel-buffer data size (%d)", byteSize, bufferSize);
    }

    // Copy the data and unlock the buffer
    memcpy(baseAddress, bytes, bufferSize);
    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);

    // Encode the frame (now in pixel-buffer form).
    err = ICMCompressionSessionEncodeFrame(	encoder->session,
                                            pixelBuffer,
                                            0, 0, 0, // we're not specifying a frame time
                                            NULL,
                                            NULL,
                                            NULL);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not encode the frame");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        CVPixelBufferRelease(pixelBuffer);
        return -5;
    }

    CVPixelBufferRelease(pixelBuffer);
    return 0;
}
Пример #12
0
static void
emit_frame(vda_decoder_t *vdad, vda_frame_t *vf, media_queue_t *mq)
{
  int i;
  CGSize siz;

  frame_info_t fi;
  memset(&fi, 0, sizeof(fi));

  CVPixelBufferLockBaseAddress(vf->vf_buf, 0);

  for(i = 0; i < 3; i++ ) {
    fi.fi_data[i] = CVPixelBufferGetBaseAddressOfPlane(vf->vf_buf, i);
    fi.fi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(vf->vf_buf, i);
  }
  
  if(vdad->vdad_last_pts != PTS_UNSET && vf->vf_pts != PTS_UNSET) {
    int64_t d = vf->vf_pts - vdad->vdad_last_pts;

    if(d > 1000 && d < 1000000)
      vdad->vdad_estimated_duration = d;
  }


  siz = CVImageBufferGetEncodedSize(vf->vf_buf);
  fi.fi_type = 'YUVP';
  fi.fi_width = siz.width;
  fi.fi_height = siz.height;

  fi.fi_duration = vf->vf_duration > 10000 ? vf->vf_duration : vdad->vdad_estimated_duration;

  siz = CVImageBufferGetDisplaySize(vf->vf_buf);
  fi.fi_dar_num = siz.width;
  fi.fi_dar_den = siz.height;

  fi.fi_pts = vf->vf_pts;
  fi.fi_color_space = -1;
  fi.fi_epoch = vf->vf_epoch;
  fi.fi_drive_clock = 1;
  fi.fi_vshift = 1;
  fi.fi_hshift = 1;

  video_decoder_t *vd = vdad->vdad_vd;

  vd->vd_estimated_duration = fi.fi_duration; // For bitrate calculations

  if(fi.fi_duration > 0)
    video_deliver_frame(vd, &fi);

  CVPixelBufferUnlockBaseAddress(vf->vf_buf, 0);
  vdad->vdad_last_pts = vf->vf_pts;

  char fmt[64];
  snprintf(fmt, sizeof(fmt), "h264 (VDA) %d x %d", fi.fi_width, fi.fi_height);
  prop_set_string(mq->mq_prop_codec, fmt);

}
Пример #13
0
static void
gst_core_media_meta_free (GstCoreMediaMeta * meta, GstBuffer * buf)
{
  if (meta->image_buf != NULL) {
    CVPixelBufferUnlockBaseAddress (meta->image_buf, 0);
    CVBufferRelease (meta->image_buf);
  }
  if (meta->block_buf != NULL) {
    CFRelease (meta->block_buf);
  }

  CFRelease (meta->sample_buf);
}
Пример #14
0
	virtual void releaseData()
	{
		if (m_ref)
		{
			if (m_locked)
			{
				CVPixelBufferUnlockBaseAddress(m_ref, 0);
				m_locked = false;
			}
			
			CVPixelBufferRelease(m_ref);
			m_ref = NULL;
		}
	}
Пример #15
0
 void
 AspectTransform::pushBuffer(const uint8_t *const data,
                             size_t size,
                             videocore::IMetadata &metadata)
 {
     auto output = m_output.lock();
     
     if(output) {
         CVPixelBufferRef pb = (CVPixelBufferRef)data;
         CVPixelBufferLockBaseAddress(pb, kCVPixelBufferLock_ReadOnly);
         
         float width = CVPixelBufferGetWidth(pb);
         float height = CVPixelBufferGetHeight(pb);
         
         if(width != m_prevWidth || height != m_prevHeight) {
             setBoundingBoxDirty();
             m_prevHeight = height;
             m_prevWidth = width;
         }
         
         if(m_boundingBoxDirty) {
             // TODO: Replace CVPixelBufferRef with an internal format.
             
             float wfac = float(m_boundingWidth) / width;
             float hfac = float(m_boundingHeight) / height;
             
             const float mult = (m_aspectMode == kAspectFit ? (wfac < hfac) : (wfac > hfac)) ? wfac : hfac;
             
             wfac = width*mult / float(m_boundingWidth);
             hfac = height*mult / float(m_boundingHeight);
             
             m_scale = glm::vec3(wfac,hfac,1.f);
             
             m_boundingBoxDirty = false;
         }
         
         CVPixelBufferUnlockBaseAddress(pb, kCVPixelBufferLock_ReadOnly);
         
         videocore::VideoBufferMetadata& md = dynamic_cast<videocore::VideoBufferMetadata&>(metadata);
         glm::mat4 & mat = md.getData<videocore::kVideoMetadataMatrix>();
         
         mat = glm::scale(mat, m_scale);
         
         output->pushBuffer(data, size, metadata);
     }
 }
Пример #16
0
 void
 PixelBufferSource::pushPixelBuffer(void *data, size_t size)
 {
     
     auto outp = m_output.lock();
     
     if(outp) {
         void* loc = CVPixelBufferGetBaseAddress((CVPixelBufferRef)m_pixelBuffer);
         CVPixelBufferLockBaseAddress((CVPixelBufferRef)m_pixelBuffer, 0);
         memcpy(loc, data, size);
         CVPixelBufferUnlockBaseAddress((CVPixelBufferRef)m_pixelBuffer, 0);
         
         VideoBufferMetadata md(0.);
         md.setData(kLayerGame, shared_from_this());
         
         outp->pushBuffer((const uint8_t*)m_pixelBuffer, sizeof(CVPixelBufferRef), md);
     }
 }
Пример #17
0
/**
 * gst_apple_core_video_pixel_buffer_unlock:
 * @gpixbuf: the wrapped CVPixelBuffer
 *
 * Unlocks the pixel buffer from CPU memory. Should be called
 * for every gst_apple_core_video_pixel_buffer_lock() call.
 */
static gboolean
gst_apple_core_video_pixel_buffer_unlock (GstAppleCoreVideoPixelBuffer *
    gpixbuf)
{
  CVOptionFlags lockFlags;
  CVReturn cvret;

  if (gpixbuf->lock_state == GST_APPLE_CORE_VIDEO_MEMORY_UNLOCKED) {
    GST_ERROR ("%p: pixel buffer %p not locked", gpixbuf, gpixbuf->buf);
    return FALSE;
  }

  if (!g_atomic_int_dec_and_test (&gpixbuf->lock_count)) {
    return TRUE;                /* still locked, by current and/or other callers */
  }

  g_mutex_lock (&gpixbuf->mutex);

  lockFlags =
      (gpixbuf->lock_state ==
      GST_APPLE_CORE_VIDEO_MEMORY_LOCKED_READONLY) ? kCVPixelBufferLock_ReadOnly
      : 0;
  cvret = CVPixelBufferUnlockBaseAddress (gpixbuf->buf, lockFlags);
  if (cvret != kCVReturnSuccess) {
    g_mutex_unlock (&gpixbuf->mutex);
    g_atomic_int_inc (&gpixbuf->lock_count);
    /* TODO: Map kCVReturnError etc. into strings */
    GST_ERROR ("%p: unable to unlock base address for pixbuf %p: %d", gpixbuf,
        gpixbuf->buf, cvret);
    return FALSE;
  }

  gpixbuf->lock_state = GST_APPLE_CORE_VIDEO_MEMORY_UNLOCKED;

  g_mutex_unlock (&gpixbuf->mutex);

  GST_DEBUG ("%p: pixbuf %p, %s (%d locks remaining)",
      gpixbuf,
      gpixbuf->buf,
      _lock_state_names[gpixbuf->lock_state], gpixbuf->lock_count);

  return TRUE;
}
Пример #18
0
bool CDVDVideoCodecVDA::GetPicture(DVDVideoPicture* pDvdVideoPicture)
{
  CCocoaAutoPool pool;
  FourCharCode pixel_buffer_format;
  CVPixelBufferRef picture_buffer_ref;

  // clone the video picture buffer settings.
  *pDvdVideoPicture = m_videobuffer;

  // get the top yuv frame, we risk getting the wrong frame if the frame queue
  // depth is less than the number of encoded reference frames. If queue depth
  // is greater than the number of encoded reference frames, then the top frame
  // will never change and we can just grab a ref to the top frame. This way
  // we don't lockout the vdadecoder while doing color format convert.
  pthread_mutex_lock(&m_queue_mutex);
  picture_buffer_ref = m_display_queue->pixel_buffer_ref;
  pixel_buffer_format = m_display_queue->pixel_buffer_format;
  pDvdVideoPicture->dts = m_display_queue->dts;
  pDvdVideoPicture->pts = m_display_queue->pts;
  pthread_mutex_unlock(&m_queue_mutex);

  // lock the CVPixelBuffer down
  CVPixelBufferLockBaseAddress(picture_buffer_ref, 0);
  int row_stride = CVPixelBufferGetBytesPerRowOfPlane(picture_buffer_ref, 0);
  uint8_t *base_ptr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(picture_buffer_ref, 0);
  if (base_ptr)
  {
    if (pixel_buffer_format == kCVPixelFormatType_422YpCbCr8)
      UYVY422_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
    else if (pixel_buffer_format == kCVPixelFormatType_32BGRA)
      BGRA_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
  }
  // unlock the CVPixelBuffer
  CVPixelBufferUnlockBaseAddress(picture_buffer_ref, 0);

  // now we can pop the top frame.
  DisplayQueuePop();

  //CLog::Log(LOGNOTICE, "%s - VDADecoderDecode dts(%f), pts(%f)", __FUNCTION__,
  //  pDvdVideoPicture->dts, pDvdVideoPicture->pts);

  return VC_PICTURE | VC_BUFFER;
}
Пример #19
0
Файл: vda.c Проект: Geal/vlc
static void copy420YpCbCr8Planar(picture_t *p_pic,
                                 CVPixelBufferRef buffer,
                                 unsigned i_height)
{
    uint8_t *pp_plane[2];
    size_t pi_pitch[2];

    if (!buffer)
        return;

    CVPixelBufferLockBaseAddress(buffer, 0);

    for (int i = 0; i < 2; i++) {
        pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane(buffer, i);
        pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(buffer, i);
    }

    CopyFromNv12ToI420(p_pic, pp_plane, pi_pitch, i_height);

    CVPixelBufferUnlockBaseAddress(buffer, 0);
}
Пример #20
0
static void h264_dec_output_cb(VTH264DecCtx *ctx, void *sourceFrameRefCon,
							   OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef imageBuffer,
							   CMTime presentationTimeStamp, CMTime presentationDuration ) {

	CGSize vsize;
	MSPicture pixbuf_desc;
	mblk_t *pixbuf = NULL;
	uint8_t *src_planes[4] = { NULL };
	int src_strides[4] = { 0 };
	size_t i;

	if(status != noErr || imageBuffer == NULL) {
		ms_error("VideoToolboxDecoder: fail to decode one frame: error %d", status);
		ms_filter_notify_no_arg(ctx->f, MS_VIDEO_DECODER_DECODING_ERRORS);
		ms_filter_lock(ctx->f);
		if(ctx->enable_avpf) {
			ms_error("VideoToolboxDecoder: sending PLI");
			ms_filter_notify_no_arg(ctx->f, MS_VIDEO_DECODER_SEND_PLI);
		}
		ms_filter_unlock(ctx->f);
		return;
	}

	vsize = CVImageBufferGetEncodedSize(imageBuffer);
	ctx->vsize.width = (int)vsize.width;
	ctx->vsize.height = (int)vsize.height;
	pixbuf = ms_yuv_buf_allocator_get(ctx->pixbuf_allocator, &pixbuf_desc, (int)vsize.width, (int)vsize.height);

	CVPixelBufferLockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
	for(i=0; i<3; i++) {
		src_planes[i] = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, i);
		src_strides[i] = (int)CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, i);
	}
	ms_yuv_buf_copy(src_planes, src_strides, pixbuf_desc.planes, pixbuf_desc.strides, ctx->vsize);
	CVPixelBufferUnlockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);

	ms_mutex_lock(&ctx->mutex);
	ms_queue_put(&ctx->queue, pixbuf);
	ms_mutex_unlock(&ctx->mutex);
}
Пример #21
0
void jit_gl_hap_draw_frame(void *jitob, CVImageBufferRef frame)
{
	t_jit_gl_hap * x = (t_jit_gl_hap*)jitob;
	CFTypeID imageType = CFGetTypeID(frame);
	OSType newPixelFormat;

	if(x->validframe)
		return;
		
	if (imageType == CVPixelBufferGetTypeID()) {
        
        // Update the texture
        CVBufferRetain(frame);
		
		if(x->buffer) {
			CVPixelBufferUnlockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly);
			CVBufferRelease(x->buffer);
		}
		
		x->buffer = frame;
		CVPixelBufferLockBaseAddress(x->buffer, kCVPixelBufferLock_ReadOnly);
		
		x->dim[0] = CVPixelBufferGetWidth(x->buffer);
		x->dim[1] = CVPixelBufferGetHeight(x->buffer);

		newPixelFormat = CVPixelBufferGetPixelFormatType(x->buffer);

		if(x->buffer && x->hap_format==JIT_GL_HAP_PF_HAP) {
			size_t extraRight, extraBottom;
			unsigned int bitsPerPixel;
			size_t bytesPerRow;
			size_t actualBufferSize;

			CVPixelBufferGetExtendedPixels(x->buffer, NULL, &extraRight, NULL, &extraBottom);
			x->roundedWidth = x->dim[0] + extraRight;
			x->roundedHeight = x->dim[1] + extraBottom;
			if (x->roundedWidth % 4 != 0 || x->roundedHeight % 4 != 0) {
				x->validframe = 0;
				return;
			}			

			switch (newPixelFormat) {
				case kHapPixelFormatTypeRGB_DXT1:
					x->newInternalFormat = GL_COMPRESSED_RGB_S3TC_DXT1_EXT;
					bitsPerPixel = 4;
					break;
				case kHapPixelFormatTypeRGBA_DXT5:
				case kHapPixelFormatTypeYCoCg_DXT5:
					x->newInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
					bitsPerPixel = 8;
					break;
				default:
					// we don't support non-DXT pixel buffers
					x->validframe = 0;
					return;
					break;
			}
			x->useshader = (newPixelFormat == kHapPixelFormatTypeYCoCg_DXT5);
			
			bytesPerRow = (x->roundedWidth * bitsPerPixel) / 8;
			x->newDataLength = bytesPerRow * x->roundedHeight; // usually not the full length of the buffer
			actualBufferSize = CVPixelBufferGetDataSize(x->buffer);
			
			// Check the buffer is as large as we expect it to be
			if (x->newDataLength > actualBufferSize) {
				x->validframe = 0;
				return;
			}

			// If we got this far we're good to go
			x->validframe = 1;
			x->target = GL_TEXTURE_2D;
			if(!x->flipped) {
				jit_attr_setlong(x->texoutput, gensym("flip"), 1);
				x->flipped = 1;
			}
			//x->drawhap = 1;
		}
		else if(x->buffer) {// && x->hap_format==JIT_GL_HAP_PF_HAP) {
			if( newPixelFormat == k24RGBPixelFormat )
				x->newInternalFormat = GL_RGB8;
			else if( newPixelFormat == k32BGRAPixelFormat )
				x->newInternalFormat = GL_RGBA8;
			else {
				x->validframe = 0;
				return;
			}

			x->roundedWidth = x->dim[0];
			x->roundedHeight = x->dim[1];
			x->newDataLength = CVPixelBufferGetDataSize(x->buffer);
			x->rowLength = CVPixelBufferGetBytesPerRow( x->buffer ) / (x->hap_format==JIT_GL_HAP_PF_RGB ? 3 : 4);
			x->target = GL_TEXTURE_RECTANGLE_EXT;
			
			if(!x->flipped) {
				jit_attr_setlong(x->texoutput, gensym("flip"), 1);
				x->flipped = 1;
			}
			x->validframe = 1;
		}
    }
	else {
#ifdef MAC_VERSION
		CGSize imageSize = CVImageBufferGetEncodedSize(frame);
		bool flipped = CVOpenGLTextureIsFlipped(frame);
		x->texture = CVOpenGLTextureGetName(frame);
		x->useshader = 0;
		x->dim[0] = (t_atom_long)imageSize.width;
		x->dim[1] = (t_atom_long)imageSize.height;
		x->validframe = 1;
		x->target = GL_TEXTURE_RECTANGLE_ARB;
		if(x->flipped!=flipped) {
			jit_attr_setlong(x->texoutput, gensym("flip"), flipped);
			x->flipped = flipped;			
		}
#endif
	}
}
Пример #22
0
int  PrivateDecoderVDA::GetFrame(AVStream *stream,
                                 AVFrame *picture,
                                 int *got_picture_ptr,
                                 AVPacket *pkt)
{
    if (!pkt)

    CocoaAutoReleasePool pool;
    int result = -1;
    if (!m_lib || !stream)
        return result;

    AVCodecContext *avctx = stream->codec;
    if (!avctx)
        return result;

    if (pkt)
    {
        CFDataRef avc_demux;
        CFDictionaryRef params;
        if (m_annexb)
        {
            // convert demuxer packet from bytestream (AnnexB) to bitstream
            AVIOContext *pb;
            int demuxer_bytes;
            uint8_t *demuxer_content;

            if(avio_open_dyn_buf(&pb) < 0)
            {
                return result;
            }
            demuxer_bytes = avc_parse_nal_units(pb, pkt->data, pkt->size);
            demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content);
            avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes);
            av_free(demuxer_content);
        }
        else if (m_convert_3byteTo4byteNALSize)
        {
            // convert demuxer packet from 3 byte NAL sizes to 4 byte
            AVIOContext *pb;
            if (avio_open_dyn_buf(&pb) < 0)
            {
                return result;
            }

            uint32_t nal_size;
            uint8_t *end = pkt->data + pkt->size;
            uint8_t *nal_start = pkt->data;
            while (nal_start < end)
            {
                nal_size = VDA_RB24(nal_start);
                avio_wb32(pb, nal_size);
                nal_start += 3;
                avio_write(pb, nal_start, nal_size);
                nal_start += nal_size;
            }

            uint8_t *demuxer_content;
            int demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content);
            avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes);
            av_free(demuxer_content);
        }
        else
        {
            avc_demux = CFDataCreate(kCFAllocatorDefault, pkt->data, pkt->size);
        }

        CFStringRef keys[4] = { CFSTR("FRAME_PTS"),
                                CFSTR("FRAME_INTERLACED"), CFSTR("FRAME_TFF"),
                                CFSTR("FRAME_REPEAT") };
        CFNumberRef values[5];
        values[0] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type,
                                   &pkt->pts);
        values[1] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type,
                                   &picture->interlaced_frame);
        values[2] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type,
                                   &picture->top_field_first);
        values[3] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type,
                                   &picture->repeat_pict);
        params = CFDictionaryCreate(kCFAllocatorDefault, (const void **)&keys,
                                    (const void **)&values, 4,
                                    &kCFTypeDictionaryKeyCallBacks,
                                    &kCFTypeDictionaryValueCallBacks);

        INIT_ST;
        vda_st = m_lib->decoderDecode((VDADecoder)m_decoder, 0, avc_demux, params);
        CHECK_ST;
        if (vda_st == kVDADecoderNoErr)
            result = pkt->size;
        CFRelease(avc_demux);
        CFRelease(params);
    }

    if (m_decoded_frames.size() < m_max_ref_frames)
        return result;

    *got_picture_ptr = 1;
    m_frame_lock.lock();
    VDAFrame vdaframe = m_decoded_frames.takeLast();
    m_frame_lock.unlock();

    if (avctx->get_buffer(avctx, picture) < 0)
        return -1;

    picture->reordered_opaque = vdaframe.pts;
    picture->interlaced_frame = vdaframe.interlaced_frame;
    picture->top_field_first  = vdaframe.top_field_first;
    picture->repeat_pict      = vdaframe.repeat_pict;
    VideoFrame *frame         = (VideoFrame*)picture->opaque;

    PixelFormat in_fmt  = PIX_FMT_NONE;
    PixelFormat out_fmt = PIX_FMT_NONE;
    if (vdaframe.format == 'BGRA')
        in_fmt = PIX_FMT_BGRA;
    else if (vdaframe.format == '2vuy')
        in_fmt = PIX_FMT_UYVY422;

    if (frame->codec == FMT_YV12)
        out_fmt = PIX_FMT_YUV420P;

    if (out_fmt != PIX_FMT_NONE && in_fmt != PIX_FMT_NONE && frame->buf)
    {
        CVPixelBufferLockBaseAddress(vdaframe.buffer, 0);
        uint8_t* base = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(vdaframe.buffer, 0);
        AVPicture img_in, img_out;
        avpicture_fill(&img_out, (uint8_t *)frame->buf, out_fmt,
                       frame->width, frame->height);
        avpicture_fill(&img_in, base, in_fmt,
                       frame->width, frame->height);
        myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt,
                       frame->width, frame->height);
        CVPixelBufferUnlockBaseAddress(vdaframe.buffer, 0);
    }
    else
    {
        LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to convert decoded frame.");
    }

    CVPixelBufferRelease(vdaframe.buffer);
    return result;
}
Пример #23
0
void
MoonVDADecoder::Cleanup (MediaFrame *frame)
{
	CVPixelBufferUnlockBaseAddress ((CVPixelBufferRef) frame->decoder_specific_data, 0);
	CVPixelBufferRelease ((CVPixelBufferRef) frame->decoder_specific_data);
}
Пример #24
0
// Copy and return a decoded frame.
nsresult
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
                            AppleVTDecoder::AppleFrameRef aFrameRef)
{
  if (mIsShutDown || mIsFlushing) {
    // We are in the process of flushing or shutting down; ignore frame.
    return NS_OK;
  }

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
      aFrameRef.byte_offset,
      aFrameRef.decode_timestamp.ToMicroseconds(),
      aFrameRef.composition_timestamp.ToMicroseconds(),
      aFrameRef.duration.ToMicroseconds(),
      aFrameRef.is_sync_point ? " keyframe" : ""
  );

  if (!aImage) {
    // Image was dropped by decoder or none return yet.
    // We need more input to continue.
    mCallback->InputExhausted();
    return NS_OK;
  }

  bool useNullSample = false;
  if (mSeekTargetThreshold.isSome()) {
    if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
      useNullSample = true;
    } else {
      mSeekTargetThreshold.reset();
    }
  }

  // Where our resulting image will end up.
  RefPtr<MediaData> data;
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mPictureWidth,
                                      mPictureHeight);

  if (useNullSample) {
    data = new NullData(aFrameRef.byte_offset,
                        aFrameRef.composition_timestamp.ToMicroseconds(),
                        aFrameRef.duration.ToMicroseconds());
  } else if (mUseSoftwareImages) {
    size_t width = CVPixelBufferGetWidth(aImage);
    size_t height = CVPixelBufferGetHeight(aImage);
    DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
    MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");

    VideoData::YCbCrBuffer buffer;

    // Lock the returned image data.
    CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
    if (rv != kCVReturnSuccess) {
      NS_ERROR("error locking pixel data");
      mCallback->Error(MediaDataDecoderError::DECODE_ERROR);
      return NS_ERROR_FAILURE;
    }
    // Y plane.
    buffer.mPlanes[0].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
    buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
    buffer.mPlanes[0].mWidth = width;
    buffer.mPlanes[0].mHeight = height;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;
    // Cb plane.
    buffer.mPlanes[1].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[1].mWidth = (width+1) / 2;
    buffer.mPlanes[1].mHeight = (height+1) / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 1;
    // Cr plane.
    buffer.mPlanes[2].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[2].mWidth = (width+1) / 2;
    buffer.mPlanes[2].mHeight = (height+1) / 2;
    buffer.mPlanes[2].mOffset = 1;
    buffer.mPlanes[2].mSkip = 1;

    // Copy the image data into our own format.
    data =
      VideoData::CreateAndCopyData(info,
                                   mImageContainer,
                                   aFrameRef.byte_offset,
                                   aFrameRef.composition_timestamp.ToMicroseconds(),
                                   aFrameRef.duration.ToMicroseconds(),
                                   buffer,
                                   aFrameRef.is_sync_point,
                                   aFrameRef.decode_timestamp.ToMicroseconds(),
                                   visible);
    // Unlock the returned image data.
    CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  } else {
#ifndef MOZ_WIDGET_UIKIT
    IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
    MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

    RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);

    RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);

    data =
      VideoData::CreateFromImage(info,
                                 aFrameRef.byte_offset,
                                 aFrameRef.composition_timestamp.ToMicroseconds(),
                                 aFrameRef.duration.ToMicroseconds(),
                                 image.forget(),
                                 aFrameRef.is_sync_point,
                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                 visible);
#else
    MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
  }

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  MonitorAutoLock mon(mMonitor);
  mReorderQueue.Push(data);
  if (mReorderQueue.Length() > mMaxRefFrames) {
    mCallback->Output(mReorderQueue.Pop().get());
  }
  mCallback->InputExhausted();
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
Пример #25
0
static int quicktimedrv_record(screenshot_t *screenshot)
{
    if (!video_ready) {
        return 0;
    }

    OSErr theError;

    // lock buffer
    theError = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    if (theError) {
        log_debug("quicktime: error locking pixel buffer!");
        return -1;
    }

    // fill frame
    unsigned char *buffer = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer);
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer);

    unsigned int line_size = screenshot->draw_buffer_line_size;
    int h = screenshot->height;
    int w = screenshot->width;
    int xoff = screenshot->x_offset;
    int yoff = screenshot->y_offset;
    BYTE *srcBuffer = screenshot->draw_buffer;

    // move to last line in tgt buffer and to first in source
    buffer += (video_yoff) * bytesPerRow + video_xoff * 3;
    srcBuffer += yoff * line_size + xoff;

    int x, y;
    for (y = 0; y < h; y++) {
        int pix = 0;
        for (x = 0; x < w; x++) {
            BYTE val = srcBuffer[x];
            buffer[pix++] = screenshot->palette->entries[val].red;
            buffer[pix++] = screenshot->palette->entries[val].green;
            buffer[pix++] = screenshot->palette->entries[val].blue;
        }
        buffer += bytesPerRow;
        srcBuffer += line_size;
    }

    // unlock buffer
    theError = CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
    if (theError) {
        log_debug("quicktime: error unlocking pixel buffer!");
        return -1;
    }

    TimeValue64 next = CVGetCurrentHostTime() / divider;
    TimeValue64 duration = next - timestamp;
    timestamp = next;

    // encode frame
    theError = ICMCompressionSessionEncodeFrame(videoCompressionSession,
                                                pixelBuffer,
                                                timestamp, duration,
                                                kICMValidTime_DisplayTimeStampIsValid |
                                                kICMValidTime_DisplayDurationIsValid,
                                                NULL, NULL, (void *)NULL);
    if (theError) {
        log_debug("quicktime: error encoding frame!");
        return -1;
    }

    return 0;
}
Пример #26
0
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VDAContext  *vda = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;

    av_frame_unref(vda->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    default:
        av_log(NULL, AV_LOG_ERROR,
               "Unsupported pixel format: %u\n", pixel_format);
        return AVERROR(ENOSYS);
    }

    vda->tmp_frame->width  = frame->width;
    vda->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vda->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
                  data, linesize, vda->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vda->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);

    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vda->tmp_frame);

    return 0;
}
Пример #27
0
int CVideoEncodeVt::CopyFrameToPixelBuffer(const AVFrame* pFrame, CVPixelBufferRef aPixelBuffer,
                                           const int* apStrides, const int* apRows)
{
    if(NULL == aPixelBuffer)
    {
        return -1;
    }
    
    
    int iPlaneCnt = 0;

    int ret = CVPixelBufferLockBaseAddress(aPixelBuffer, 0);
    if(0 != ret)
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CVPixelBufferPoolCreatePixelBuffer failed!");
        return -1;
    }
    
    
    if(CVPixelBufferIsPlanar(aPixelBuffer))
    {
        iPlaneCnt = (int)CVPixelBufferGetPlaneCount(aPixelBuffer);
        
        for(int i = 0; pFrame->data[i]; i++)
        {
            if(i == iPlaneCnt)
            {
                CVPixelBufferUnlockBaseAddress(aPixelBuffer, 0);
                
                return -1;
            }
            
            uint8_t* pSrc = pFrame->data[i];
            uint8_t* pDst = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(aPixelBuffer, i);
            int iSrcStride = apStrides[i];
            int iDstStride = (int)CVPixelBufferGetBytesPerRowOfPlane(aPixelBuffer, i);
            
            if(iSrcStride == iDstStride)
            {
                memcpy(pDst, pSrc, iSrcStride * apRows[i]);
            }
            else
            {
                int iCopyBytes = iDstStride < iSrcStride ? iDstStride : iSrcStride;
                for(int j = 0; j < apRows[i]; j++)
                {
                    memcpy(pDst + j * iDstStride, pSrc + j * iSrcStride, iCopyBytes);
                }
            }
        }
    }
    else
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "aPixelBuffer muse be yuv420p!");
        
        CVPixelBufferUnlockBaseAddress(aPixelBuffer, 0);
        return -1;
    }
    
    CVPixelBufferUnlockBaseAddress(aPixelBuffer, 0);
    
    
    return 0;
}
Пример #28
0
// Copy and return a decoded frame.
nsresult
AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
                             AppleVDADecoder::AppleFrameRef aFrameRef)
{
  if (mIsShutDown || mIsFlushing) {
    // We are in the process of flushing or shutting down; ignore frame.
    return NS_OK;
  }

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
      aFrameRef.byte_offset,
      aFrameRef.decode_timestamp.ToMicroseconds(),
      aFrameRef.composition_timestamp.ToMicroseconds(),
      aFrameRef.duration.ToMicroseconds(),
      aFrameRef.is_sync_point ? " keyframe" : ""
  );

  if (mQueuedSamples > mMaxRefFrames) {
    // We had stopped requesting more input because we had received too much at
    // the time. We can ask for more once again.
    mCallback->InputExhausted();
  }
  MOZ_ASSERT(mQueuedSamples);
  mQueuedSamples--;

  if (!aImage) {
    // Image was dropped by decoder.
    return NS_OK;
  }

  // Where our resulting image will end up.
  nsRefPtr<VideoData> data;
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mPictureWidth,
                                      mPictureHeight);

  if (mUseSoftwareImages) {
    size_t width = CVPixelBufferGetWidth(aImage);
    size_t height = CVPixelBufferGetHeight(aImage);
    DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
    MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");

    VideoData::YCbCrBuffer buffer;

    // Lock the returned image data.
    CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
    if (rv != kCVReturnSuccess) {
      NS_ERROR("error locking pixel data");
      mCallback->Error();
      return NS_ERROR_FAILURE;
    }
    // Y plane.
    buffer.mPlanes[0].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
    buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
    buffer.mPlanes[0].mWidth = width;
    buffer.mPlanes[0].mHeight = height;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;
    // Cb plane.
    buffer.mPlanes[1].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[1].mWidth = (width+1) / 2;
    buffer.mPlanes[1].mHeight = (height+1) / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 1;
    // Cr plane.
    buffer.mPlanes[2].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[2].mWidth = (width+1) / 2;
    buffer.mPlanes[2].mHeight = (height+1) / 2;
    buffer.mPlanes[2].mOffset = 1;
    buffer.mPlanes[2].mSkip = 1;

    // Copy the image data into our own format.
    data =
      VideoData::Create(info,
                        mImageContainer,
                        nullptr,
                        aFrameRef.byte_offset,
                        aFrameRef.composition_timestamp.ToMicroseconds(),
                        aFrameRef.duration.ToMicroseconds(),
                        buffer,
                        aFrameRef.is_sync_point,
                        aFrameRef.decode_timestamp.ToMicroseconds(),
                        visible);
    // Unlock the returned image data.
    CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  } else {
    IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
    MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

    nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);

    nsRefPtr<layers::Image> image =
      mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
    layers::MacIOSurfaceImage* videoImage =
      static_cast<layers::MacIOSurfaceImage*>(image.get());
    videoImage->SetSurface(macSurface);

    data =
      VideoData::CreateFromImage(info,
                                 mImageContainer,
                                 aFrameRef.byte_offset,
                                 aFrameRef.composition_timestamp.ToMicroseconds(),
                                 aFrameRef.duration.ToMicroseconds(),
                                 image.forget(),
                                 aFrameRef.is_sync_point,
                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                 visible);
  }

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  MonitorAutoLock mon(mMonitor);
  mReorderQueue.Push(data);
  while (mReorderQueue.Length() > mMaxRefFrames) {
    mCallback->Output(mReorderQueue.Pop().get());
  }
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
Пример #29
0
bool CDVDVideoCodecVDA::GetPicture(DVDVideoPicture* pDvdVideoPicture)
{
  // get the top yuv frame, we risk getting the wrong frame if the frame queue
  // depth is less than the number of encoded reference frames. If queue depth
  // is greater than the number of encoded reference frames, then the top frame
  // will never change and we can just grab a ref to the top frame.
  if (m_use_cvBufferRef)
  {
    pthread_mutex_lock(&m_queue_mutex);
    pDvdVideoPicture->dts             = m_display_queue->dts;
    pDvdVideoPicture->pts             = m_display_queue->pts;
    pDvdVideoPicture->cvBufferRef     = m_display_queue->pixel_buffer_ref;
    m_display_queue->pixel_buffer_ref = NULL;
    pthread_mutex_unlock(&m_queue_mutex);

    pDvdVideoPicture->format          = RENDER_FMT_CVBREF;
    pDvdVideoPicture->iFlags          = DVP_FLAG_ALLOCATED;
    pDvdVideoPicture->color_range     = 0;
    pDvdVideoPicture->color_matrix    = 4;
    pDvdVideoPicture->iWidth          = CVPixelBufferGetWidth(pDvdVideoPicture->cvBufferRef);
    pDvdVideoPicture->iHeight         = CVPixelBufferGetHeight(pDvdVideoPicture->cvBufferRef);
    pDvdVideoPicture->iDisplayWidth   = pDvdVideoPicture->iWidth;
    pDvdVideoPicture->iDisplayHeight  = pDvdVideoPicture->iHeight;
  }
  else
  {
    FourCharCode pixel_buffer_format;
    CVPixelBufferRef picture_buffer_ref;

    // clone the video picture buffer settings.
    *pDvdVideoPicture = m_videobuffer;

    // get the top yuv frame, we risk getting the wrong frame if the frame queue
    // depth is less than the number of encoded reference frames. If queue depth
    // is greater than the number of encoded reference frames, then the top frame
    // will never change and we can just grab a ref to the top frame. This way
    // we don't lockout the vdadecoder while doing color format convert.
    pthread_mutex_lock(&m_queue_mutex);
    picture_buffer_ref = m_display_queue->pixel_buffer_ref;
    pixel_buffer_format = m_display_queue->pixel_buffer_format;
    pDvdVideoPicture->dts = m_display_queue->dts;
    pDvdVideoPicture->pts = m_display_queue->pts;
    pthread_mutex_unlock(&m_queue_mutex);

    // lock the CVPixelBuffer down
    CVPixelBufferLockBaseAddress(picture_buffer_ref, 0);
    int row_stride = CVPixelBufferGetBytesPerRowOfPlane(picture_buffer_ref, 0);
    uint8_t *base_ptr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(picture_buffer_ref, 0);
    if (base_ptr)
    {
      if (pixel_buffer_format == kCVPixelFormatType_422YpCbCr8)
        UYVY422_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
      else if (pixel_buffer_format == kCVPixelFormatType_32BGRA)
        BGRA_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
    }
    // unlock the CVPixelBuffer
    CVPixelBufferUnlockBaseAddress(picture_buffer_ref, 0);
  }

  // now we can pop the top frame.
  DisplayQueuePop();
  //CLog::Log(LOGNOTICE, "%s - VDADecoderDecode dts(%f), pts(%f)", __FUNCTION__,
  //  pDvdVideoPicture->dts, pDvdVideoPicture->pts);

  return true;
}
Пример #30
0
static void
emit_frame(vtb_decoder_t *vtbd, vtb_frame_t *vf, media_queue_t *mq)
{
  CGSize siz;

  frame_info_t fi;
  memset(&fi, 0, sizeof(fi));

  if(vtbd->vtbd_last_pts != PTS_UNSET && vf->vf_mbm.mbm_pts != PTS_UNSET) {
    int64_t d = vf->vf_mbm.mbm_pts - vtbd->vtbd_last_pts;

    if(d > 1000 && d < 1000000)
      vtbd->vtbd_estimated_duration = d;
  }

  siz = CVImageBufferGetDisplaySize(vf->vf_buf);
  fi.fi_dar_num = siz.width;
  fi.fi_dar_den = siz.height;

  fi.fi_pts = vf->vf_mbm.mbm_pts;
  fi.fi_color_space = -1;
  fi.fi_epoch = vf->vf_mbm.mbm_epoch;
  fi.fi_drive_clock = vf->vf_mbm.mbm_drive_clock;
  fi.fi_user_time = vf->vf_mbm.mbm_user_time;
  fi.fi_vshift = 1;
  fi.fi_hshift = 1;
  fi.fi_duration = vf->vf_mbm.mbm_duration > 10000 ? vf->vf_mbm.mbm_duration : vtbd->vtbd_estimated_duration;

  siz = CVImageBufferGetEncodedSize(vf->vf_buf);
  fi.fi_width = siz.width;
  fi.fi_height = siz.height;


  video_decoder_t *vd = vtbd->vtbd_vd;
  vd->vd_estimated_duration = fi.fi_duration; // For bitrate calculations

  switch(vtbd->vtbd_pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar:
      fi.fi_type = 'YUVP';

      CVPixelBufferLockBaseAddress(vf->vf_buf, 0);

      for(int i = 0; i < 3; i++ ) {
        fi.fi_data[i]  = CVPixelBufferGetBaseAddressOfPlane(vf->vf_buf, i);
        fi.fi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(vf->vf_buf, i);
      }

      if(fi.fi_duration > 0)
        video_deliver_frame(vd, &fi);

      CVPixelBufferUnlockBaseAddress(vf->vf_buf, 0);
      break;

    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
    case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
      fi.fi_type = 'CVPB';
      fi.fi_data[0] = (void *)vf->vf_buf;
      if(fi.fi_duration > 0)
        video_deliver_frame(vd, &fi);
      break;
  }



  vtbd->vtbd_last_pts = vf->vf_mbm.mbm_pts;

  char fmt[64];
  snprintf(fmt, sizeof(fmt), "h264 (VTB) %d x %d", fi.fi_width, fi.fi_height);
  prop_set_string(mq->mq_prop_codec, fmt);
}