Esempio n. 1
0
VideoFrame VideoDecoderVDA::frame()
{
    DPTR_D(VideoDecoderVDA);
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
    if (!cv_buffer) {
        qDebug("Frame buffer is empty.");
        return VideoFrame();
    }
    if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
        qDebug("Empty frame buffer");
        return VideoFrame();
    }
    VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
        return VideoFrame();
    }
    // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
    class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
        bool glinterop;
        CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
    public:
        SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
            //CVPixelBufferRetain(cvbuf);
        }
        ~SurfaceInteropCVBuffer() {
            CVPixelBufferRelease(cvbuf);
        }
        void* mapToHost(const VideoFormat &format, void *handle, int plane) {
            Q_UNUSED(plane);
            CVPixelBufferLockBaseAddress(cvbuf, 0);
            const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
            if (!fmt.isValid()) {
                CVPixelBufferUnlockBaseAddress(cvbuf, 0);
                return NULL;
            }
            const int w = CVPixelBufferGetWidth(cvbuf);
            const int h = CVPixelBufferGetHeight(cvbuf);
            uint8_t *src[3];
            int pitch[3];
            for (int i = 0; i <fmt.planeCount(); ++i) {
                // get address results in internal copy
                src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
                pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
            }
            CVPixelBufferUnlockBaseAddress(cvbuf, 0);
            //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
            VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
            if (fmt != format)
                frame = frame.to(format);
            VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
            frame.setTimestamp(f->timestamp());
            frame.setDisplayAspectRatio(f->displayAspectRatio());
            *f = frame;
            return f;
        }
Esempio n. 2
0
bool CDVDVideoCodecVDA::GetPicture(DVDVideoPicture* pDvdVideoPicture)
{
  CCocoaAutoPool pool;
  FourCharCode pixel_buffer_format;
  CVPixelBufferRef picture_buffer_ref;

  // clone the video picture buffer settings.
  *pDvdVideoPicture = m_videobuffer;

  // get the top yuv frame, we risk getting the wrong frame if the frame queue
  // depth is less than the number of encoded reference frames. If queue depth
  // is greater than the number of encoded reference frames, then the top frame
  // will never change and we can just grab a ref to the top frame. This way
  // we don't lockout the vdadecoder while doing color format convert.
  pthread_mutex_lock(&m_queue_mutex);
  picture_buffer_ref = m_display_queue->pixel_buffer_ref;
  pixel_buffer_format = m_display_queue->pixel_buffer_format;
  pDvdVideoPicture->dts = m_display_queue->dts;
  pDvdVideoPicture->pts = m_display_queue->pts;
  pthread_mutex_unlock(&m_queue_mutex);

  // lock the CVPixelBuffer down
  CVPixelBufferLockBaseAddress(picture_buffer_ref, 0);
  int row_stride = CVPixelBufferGetBytesPerRowOfPlane(picture_buffer_ref, 0);
  uint8_t *base_ptr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(picture_buffer_ref, 0);
  if (base_ptr)
  {
    if (pixel_buffer_format == kCVPixelFormatType_422YpCbCr8)
      UYVY422_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
    else if (pixel_buffer_format == kCVPixelFormatType_32BGRA)
      BGRA_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
  }
  // unlock the CVPixelBuffer
  CVPixelBufferUnlockBaseAddress(picture_buffer_ref, 0);

  // now we can pop the top frame.
  DisplayQueuePop();

  //CLog::Log(LOGNOTICE, "%s - VDADecoderDecode dts(%f), pts(%f)", __FUNCTION__,
  //  pDvdVideoPicture->dts, pDvdVideoPicture->pts);

  return VC_PICTURE | VC_BUFFER;
}
Esempio n. 3
0
File: vda.c Progetto: Geal/vlc
static void copy420YpCbCr8Planar(picture_t *p_pic,
                                 CVPixelBufferRef buffer,
                                 unsigned i_height)
{
    uint8_t *pp_plane[2];
    size_t pi_pitch[2];

    if (!buffer)
        return;

    CVPixelBufferLockBaseAddress(buffer, 0);

    for (int i = 0; i < 2; i++) {
        pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane(buffer, i);
        pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(buffer, i);
    }

    CopyFromNv12ToI420(p_pic, pp_plane, pi_pitch, i_height);

    CVPixelBufferUnlockBaseAddress(buffer, 0);
}
Esempio n. 4
0
static void h264_dec_output_cb(VTH264DecCtx *ctx, void *sourceFrameRefCon,
							   OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef imageBuffer,
							   CMTime presentationTimeStamp, CMTime presentationDuration ) {

	CGSize vsize;
	MSPicture pixbuf_desc;
	mblk_t *pixbuf = NULL;
	uint8_t *src_planes[4] = { NULL };
	int src_strides[4] = { 0 };
	size_t i;

	if(status != noErr || imageBuffer == NULL) {
		ms_error("VideoToolboxDecoder: fail to decode one frame: error %d", status);
		ms_filter_notify_no_arg(ctx->f, MS_VIDEO_DECODER_DECODING_ERRORS);
		ms_filter_lock(ctx->f);
		if(ctx->enable_avpf) {
			ms_error("VideoToolboxDecoder: sending PLI");
			ms_filter_notify_no_arg(ctx->f, MS_VIDEO_DECODER_SEND_PLI);
		}
		ms_filter_unlock(ctx->f);
		return;
	}

	vsize = CVImageBufferGetEncodedSize(imageBuffer);
	ctx->vsize.width = (int)vsize.width;
	ctx->vsize.height = (int)vsize.height;
	pixbuf = ms_yuv_buf_allocator_get(ctx->pixbuf_allocator, &pixbuf_desc, (int)vsize.width, (int)vsize.height);

	CVPixelBufferLockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
	for(i=0; i<3; i++) {
		src_planes[i] = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, i);
		src_strides[i] = (int)CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, i);
	}
	ms_yuv_buf_copy(src_planes, src_strides, pixbuf_desc.planes, pixbuf_desc.strides, ctx->vsize);
	CVPixelBufferUnlockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);

	ms_mutex_lock(&ctx->mutex);
	ms_queue_put(&ctx->queue, pixbuf);
	ms_mutex_unlock(&ctx->mutex);
}
Esempio n. 5
0
/*****************************************************************************
 * vda_Copy420YpCbCr8Planar: copy y420 CVPixelBuffer to picture_t
 *****************************************************************************/
static void vda_Copy420YpCbCr8Planar( picture_t *p_pic,
                                      CVPixelBufferRef buffer,
                                      unsigned i_width,
                                      unsigned i_height,
                                      copy_cache_t *cache )
{
    uint8_t *pp_plane[3];
    size_t  pi_pitch[3];

    CVPixelBufferLockBaseAddress( buffer, 0 );

    for( int i = 0; i < 3; i++ )
    {
        pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane( buffer, i );
        pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane( buffer, i );
    }

    CopyFromYv12( p_pic, pp_plane, pi_pitch,
                  i_width, i_height, cache );

    CVPixelBufferUnlockBaseAddress( buffer, 0 );
}
Esempio n. 6
0
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VDAContext  *vda = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;

    av_frame_unref(vda->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    default:
        av_log(NULL, AV_LOG_ERROR,
               "Unsupported pixel format: %u\n", pixel_format);
        return AVERROR(ENOSYS);
    }

    vda->tmp_frame->width  = frame->width;
    vda->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vda->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
                  data, linesize, vda->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vda->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);

    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vda->tmp_frame);

    return 0;
}
Esempio n. 7
0
// Copy and return a decoded frame.
nsresult
AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
                             AppleVDADecoder::AppleFrameRef aFrameRef)
{
  if (mIsShutDown || mIsFlushing) {
    // We are in the process of flushing or shutting down; ignore frame.
    return NS_OK;
  }

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
      aFrameRef.byte_offset,
      aFrameRef.decode_timestamp.ToMicroseconds(),
      aFrameRef.composition_timestamp.ToMicroseconds(),
      aFrameRef.duration.ToMicroseconds(),
      aFrameRef.is_sync_point ? " keyframe" : ""
  );

  if (mQueuedSamples > mMaxRefFrames) {
    // We had stopped requesting more input because we had received too much at
    // the time. We can ask for more once again.
    mCallback->InputExhausted();
  }
  MOZ_ASSERT(mQueuedSamples);
  mQueuedSamples--;

  if (!aImage) {
    // Image was dropped by decoder.
    return NS_OK;
  }

  // Where our resulting image will end up.
  nsRefPtr<VideoData> data;
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mPictureWidth,
                                      mPictureHeight);

  if (mUseSoftwareImages) {
    size_t width = CVPixelBufferGetWidth(aImage);
    size_t height = CVPixelBufferGetHeight(aImage);
    DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
    MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");

    VideoData::YCbCrBuffer buffer;

    // Lock the returned image data.
    CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
    if (rv != kCVReturnSuccess) {
      NS_ERROR("error locking pixel data");
      mCallback->Error();
      return NS_ERROR_FAILURE;
    }
    // Y plane.
    buffer.mPlanes[0].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
    buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
    buffer.mPlanes[0].mWidth = width;
    buffer.mPlanes[0].mHeight = height;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;
    // Cb plane.
    buffer.mPlanes[1].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[1].mWidth = (width+1) / 2;
    buffer.mPlanes[1].mHeight = (height+1) / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 1;
    // Cr plane.
    buffer.mPlanes[2].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[2].mWidth = (width+1) / 2;
    buffer.mPlanes[2].mHeight = (height+1) / 2;
    buffer.mPlanes[2].mOffset = 1;
    buffer.mPlanes[2].mSkip = 1;

    // Copy the image data into our own format.
    data =
      VideoData::Create(info,
                        mImageContainer,
                        nullptr,
                        aFrameRef.byte_offset,
                        aFrameRef.composition_timestamp.ToMicroseconds(),
                        aFrameRef.duration.ToMicroseconds(),
                        buffer,
                        aFrameRef.is_sync_point,
                        aFrameRef.decode_timestamp.ToMicroseconds(),
                        visible);
    // Unlock the returned image data.
    CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  } else {
    IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
    MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

    nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);

    nsRefPtr<layers::Image> image =
      mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
    layers::MacIOSurfaceImage* videoImage =
      static_cast<layers::MacIOSurfaceImage*>(image.get());
    videoImage->SetSurface(macSurface);

    data =
      VideoData::CreateFromImage(info,
                                 mImageContainer,
                                 aFrameRef.byte_offset,
                                 aFrameRef.composition_timestamp.ToMicroseconds(),
                                 aFrameRef.duration.ToMicroseconds(),
                                 image.forget(),
                                 aFrameRef.is_sync_point,
                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                 visible);
  }

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  MonitorAutoLock mon(mMonitor);
  mReorderQueue.Push(data);
  while (mReorderQueue.Length() > mMaxRefFrames) {
    mCallback->Output(mReorderQueue.Pop().get());
  }
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
GstBuffer *
gst_core_video_buffer_new (CVBufferRef cvbuf, GstVideoInfo * vinfo)
{
  CVPixelBufferRef pixbuf = NULL;
  GstBuffer *buf;
  GstCoreVideoMeta *meta;
  guint n_planes;
  gsize offset[GST_VIDEO_MAX_PLANES];
  gint stride[GST_VIDEO_MAX_PLANES];

  if (CFGetTypeID (cvbuf) != CVPixelBufferGetTypeID ())
    /* TODO: Do we need to handle other buffer types? */
    goto error;

  pixbuf = (CVPixelBufferRef) cvbuf;

  if (CVPixelBufferLockBaseAddress (pixbuf,
          kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
    goto error;
  }

  buf = gst_buffer_new ();

  /* add the corevideo meta to free the underlying corevideo buffer */
  meta = (GstCoreVideoMeta *) gst_buffer_add_meta (buf,
      gst_core_video_meta_get_info (), NULL);
  meta->cvbuf = CVBufferRetain (cvbuf);
  meta->pixbuf = pixbuf;

  /* set stride, offset and size */
  memset (&offset, 0, sizeof (offset));
  memset (&stride, 0, sizeof (stride));

  if (CVPixelBufferIsPlanar (pixbuf)) {
    int i, size, off;

    n_planes = CVPixelBufferGetPlaneCount (pixbuf);
    off = 0;
    for (i = 0; i < n_planes; ++i) {
      stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixbuf, i);
      size = stride[i] * CVPixelBufferGetHeightOfPlane (pixbuf, i);
      offset[i] = off;
      off += size;

      gst_buffer_append_memory (buf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              CVPixelBufferGetBaseAddressOfPlane (pixbuf, i), size, 0, size,
              NULL, NULL));
    }
  } else {
    int size;

    n_planes = 1;
    stride[0] = CVPixelBufferGetBytesPerRow (pixbuf);
    offset[0] = 0;
    size = stride[0] * vinfo->height;

    gst_buffer_append_memory (buf,
        gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
            CVPixelBufferGetBaseAddress (pixbuf), size, 0, size, NULL, NULL));
  }

  if (vinfo) {
    GstVideoMeta *video_meta;

    video_meta =
        gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
        vinfo->finfo->format, vinfo->width, vinfo->height,
        n_planes, offset, stride);
  }

  return buf;

error:
  return NULL;
}
Esempio n. 9
0
File: vtb.c Progetto: Ezio-PS/movian
static void
emit_frame(vtb_decoder_t *vtbd, vtb_frame_t *vf, media_queue_t *mq)
{
  CGSize siz;

  frame_info_t fi;
  memset(&fi, 0, sizeof(fi));

  if(vtbd->vtbd_last_pts != PTS_UNSET && vf->vf_mbm.mbm_pts != PTS_UNSET) {
    int64_t d = vf->vf_mbm.mbm_pts - vtbd->vtbd_last_pts;

    if(d > 1000 && d < 1000000)
      vtbd->vtbd_estimated_duration = d;
  }

  siz = CVImageBufferGetDisplaySize(vf->vf_buf);
  fi.fi_dar_num = siz.width;
  fi.fi_dar_den = siz.height;

  fi.fi_pts = vf->vf_mbm.mbm_pts;
  fi.fi_color_space = -1;
  fi.fi_epoch = vf->vf_mbm.mbm_epoch;
  fi.fi_drive_clock = vf->vf_mbm.mbm_drive_clock;
  fi.fi_user_time = vf->vf_mbm.mbm_user_time;
  fi.fi_vshift = 1;
  fi.fi_hshift = 1;
  fi.fi_duration = vf->vf_mbm.mbm_duration > 10000 ? vf->vf_mbm.mbm_duration : vtbd->vtbd_estimated_duration;

  siz = CVImageBufferGetEncodedSize(vf->vf_buf);
  fi.fi_width = siz.width;
  fi.fi_height = siz.height;


  video_decoder_t *vd = vtbd->vtbd_vd;
  vd->vd_estimated_duration = fi.fi_duration; // For bitrate calculations

  switch(vtbd->vtbd_pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar:
      fi.fi_type = 'YUVP';

      CVPixelBufferLockBaseAddress(vf->vf_buf, 0);

      for(int i = 0; i < 3; i++ ) {
        fi.fi_data[i]  = CVPixelBufferGetBaseAddressOfPlane(vf->vf_buf, i);
        fi.fi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(vf->vf_buf, i);
      }

      if(fi.fi_duration > 0)
        video_deliver_frame(vd, &fi);

      CVPixelBufferUnlockBaseAddress(vf->vf_buf, 0);
      break;

    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
    case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
      fi.fi_type = 'CVPB';
      fi.fi_data[0] = (void *)vf->vf_buf;
      if(fi.fi_duration > 0)
        video_deliver_frame(vd, &fi);
      break;
  }



  vtbd->vtbd_last_pts = vf->vf_mbm.mbm_pts;

  char fmt[64];
  snprintf(fmt, sizeof(fmt), "h264 (VTB) %d x %d", fi.fi_width, fi.fi_height);
  prop_set_string(mq->mq_prop_codec, fmt);
}
Esempio n. 10
0
static GF_Err VTBDec_ProcessData(GF_MediaDecoder *ifcg,
                               char *inBuffer, u32 inBufferLength,
                               u16 ES_ID, u32 *CTS,
                               char *outBuffer, u32 *outBufferLength,
                               u8 PaddingBits, u32 mmlevel)
{
    OSStatus status;
    CMSampleBufferRef sample = NULL;
    CMBlockBufferRef block_buffer = NULL;
	OSType type;
	char *in_data;
	u32 in_data_size;
	
	GF_Err e;
	VTBDec *ctx = (VTBDec *)ifcg->privateStack;
	
	if (ctx->skip_mpeg4_vosh) {
		GF_M4VDecSpecInfo dsi;
		dsi.width = dsi.height = 0;
		e = gf_m4v_get_config(inBuffer, inBufferLength, &dsi);
		//found a vosh - remove it from payload, init decoder if needed
		if ((e==GF_OK) && dsi.width && dsi.height) {
			if (!ctx->vtb_session) {
				ctx->vosh = inBuffer;
				ctx->vosh_size = dsi.next_object_start;
				e = VTBDec_InitDecoder(ctx, GF_FALSE);
				if (e) return e;

				//enfoce removal for all frames
				ctx->skip_mpeg4_vosh = GF_TRUE;
				
				if (ctx->out_size != *outBufferLength) {
					*outBufferLength = ctx->out_size;
					return GF_BUFFER_TOO_SMALL;
				}
			}
			ctx->vosh_size = dsi.next_object_start;
		} else if (!ctx->vtb_session) {
			*outBufferLength=0;
			return GF_OK;
		}
	}

	if (ctx->init_mpeg12) {
		GF_M4VDecSpecInfo dsi;
		dsi.width = dsi.height = 0;
		
		e = gf_mpegv12_get_config(inBuffer, inBufferLength, &dsi);
		if ((e==GF_OK) && dsi.width && dsi.height) {
			ctx->width = dsi.width;
			ctx->height = dsi.height;
			ctx->pixel_ar = dsi.par_num;
			ctx->pixel_ar <<= 16;
			ctx->pixel_ar |= dsi.par_den;
			
			e = VTBDec_InitDecoder(ctx, GF_FALSE);
			if (e) return e;

			if (ctx->out_size != *outBufferLength) {
				*outBufferLength = ctx->out_size;
				return GF_BUFFER_TOO_SMALL;
			}
		}

		if (!ctx->vtb_session) {
			*outBufferLength=0;
			return GF_OK;
		}
	}

	if (ctx->is_annex_b || (!ctx->vtb_session && ctx->nalu_size_length) ) {
		if (ctx->cached_annex_b) {
			in_data = ctx->cached_annex_b;
			in_data_size = ctx->cached_annex_b_size;
			ctx->cached_annex_b = NULL;
		} else {
			e = VTB_RewriteNALs(ctx, inBuffer, inBufferLength, &in_data, &in_data_size);
			if (e) return e;
		}
		
		if (ctx->out_size != *outBufferLength) {
			*outBufferLength = ctx->out_size;
			ctx->cached_annex_b = in_data;
			ctx->cached_annex_b_size = in_data_size;

			return GF_BUFFER_TOO_SMALL;
		}
	} else if (ctx->vosh_size) {
		in_data = inBuffer + ctx->vosh_size;
		in_data_size = inBufferLength - ctx->vosh_size;
		ctx->vosh_size = 0;
	} else {
		in_data = inBuffer;
		in_data_size = inBufferLength;
	}
	
	if (!ctx->vtb_session) {
		*outBufferLength=0;
		return GF_OK;
	}
	

	status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, in_data, in_data_size, kCFAllocatorNull, NULL, 0, in_data_size, 0, &block_buffer);

	if (status) {
		return GF_IO_ERR;
	}

	*outBufferLength=0;
	if (block_buffer == NULL)
		return GF_OK;
		
	
	status = CMSampleBufferCreate(kCFAllocatorDefault, block_buffer, TRUE, NULL, NULL, ctx->fmt_desc, 1, 0, NULL, 0, NULL, &sample);

    if (status || (sample==NULL)) {
		if (block_buffer)
			CFRelease(block_buffer);
		return GF_IO_ERR;
	}
	ctx->last_error = GF_OK;
    status = VTDecompressionSessionDecodeFrame(ctx->vtb_session, sample, 0, NULL, 0);
    if (!status)
		status = VTDecompressionSessionWaitForAsynchronousFrames(ctx->vtb_session);
	

	CFRelease(block_buffer);
	CFRelease(sample);
	if (ctx->cached_annex_b)
		gf_free(in_data);
	
	if (ctx->last_error) return ctx->last_error;
	if (status) return GF_NON_COMPLIANT_BITSTREAM;
	
	if (!ctx->frame) {
		*outBufferLength=0;
		return ctx->last_error;
	}
	
	*outBufferLength = ctx->out_size;
	
	status = CVPixelBufferLockBaseAddress(ctx->frame, kCVPixelBufferLock_ReadOnly);
    if (status != kCVReturnSuccess) {
        GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[VTB] Error locking frame data\n"));
        return GF_IO_ERR;
    }
	type = CVPixelBufferGetPixelFormatType(ctx->frame);
	
    if (CVPixelBufferIsPlanar(ctx->frame)) {
        u32 i, j, nb_planes = (u32) CVPixelBufferGetPlaneCount(ctx->frame);
		char *dst = outBuffer;
		Bool needs_stride=GF_FALSE;
		if ((type==kCVPixelFormatType_420YpCbCr8Planar)
			|| (type==kCVPixelFormatType_420YpCbCr8PlanarFullRange)
			|| (type==kCVPixelFormatType_422YpCbCr8_yuvs)
			|| (type==kCVPixelFormatType_444YpCbCr8)
			|| (type=='444v')
		
		) {
			u32 stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, 0);
			
			//TOCHECK - for now the 3 planes are consecutive in VideoToolbox
			if (stride==ctx->width) {
				char *data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, 0);
				memcpy(dst, data, sizeof(char)*ctx->out_size);
			} else {
				for (i=0; i<nb_planes; i++) {
					char *data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, i);
					u32 stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, i);
					u32 w, h = (u32) CVPixelBufferGetHeightOfPlane(ctx->frame, i);
					w = ctx->width;
					if (i) {
						switch (ctx->pix_fmt) {
						case GF_PIXEL_YUV444:
							break;
						case GF_PIXEL_YUV422:
						case GF_PIXEL_YV12:
							w /= 2;
							break;
						}
					}
					if (stride != w) {
						needs_stride=GF_TRUE;
						for (j=0; j<h; j++) {
							memcpy(dst, data, sizeof(char)*w);
							dst += w;
							data += stride;
						}
					} else {
						memcpy(dst, data, sizeof(char)*h*stride);
						dst += sizeof(char)*h*stride;
					}
				}
			}
        } else if ((type==kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange) || (type==kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)) {
			char *dst_v;
			char *data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, 0);
			u32 stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, 0);
			u32 i, h = (u32) CVPixelBufferGetHeightOfPlane(ctx->frame, 0);

			if (stride==ctx->width) {
				memcpy(dst, data, sizeof(char)*h*stride);
				dst += sizeof(char)*h*stride;
			} else {
				for (i=0; i<h; i++) {
					memcpy(dst, data, sizeof(char)*ctx->width);
					dst += ctx->width;
					data += stride;
				}
				needs_stride=GF_TRUE;
			}
			
			data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, 1);
			stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, 1);
			h = (u32) CVPixelBufferGetHeightOfPlane(ctx->frame, 1);
			dst_v = dst+sizeof(char) * h*stride/2;

			for (i=0; i<ctx->width * h / 2; i++) {
				*dst = data[0];
				*dst_v = data[1];
				data += 2;
				dst_v++;
				dst++;
				
				if (!(i%ctx->width)) data += (stride - ctx->width);

			}

		}
    }

    CVPixelBufferUnlockBaseAddress(ctx->frame, kCVPixelBufferLock_ReadOnly);

	return GF_OK;
}
Esempio n. 11
0
GstBuffer *
gst_core_media_buffer_new (CMSampleBufferRef sample_buf)
{
  CVImageBufferRef image_buf;
  CVPixelBufferRef pixel_buf;
  CMBlockBufferRef block_buf;
  gchar *data = NULL;
  UInt32 size;
  OSStatus status;
  GstBuffer *buf;
  GstCoreMediaMeta *meta;

  image_buf = CMSampleBufferGetImageBuffer (sample_buf);
  pixel_buf = NULL;
  block_buf = CMSampleBufferGetDataBuffer (sample_buf);

  if (image_buf != NULL &&
      CFGetTypeID (image_buf) == CVPixelBufferGetTypeID ()) {
    pixel_buf = (CVPixelBufferRef) image_buf;

    if (CVPixelBufferLockBaseAddress (pixel_buf,
            kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) {
      goto error;
    }

    if (CVPixelBufferIsPlanar (pixel_buf)) {
      gint plane_count, plane_idx;

      data = CVPixelBufferGetBaseAddressOfPlane (pixel_buf, 0);

      size = 0;
      plane_count = CVPixelBufferGetPlaneCount (pixel_buf);
      for (plane_idx = 0; plane_idx != plane_count; plane_idx++) {
        size += CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, plane_idx) *
            CVPixelBufferGetHeightOfPlane (pixel_buf, plane_idx);
      }
    } else {
      data = CVPixelBufferGetBaseAddress (pixel_buf);
      size = CVPixelBufferGetBytesPerRow (pixel_buf) *
          CVPixelBufferGetHeight (pixel_buf);
    }
  } else if (block_buf != NULL) {
    status = CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data);
    if (status != noErr)
      goto error;
    size = CMBlockBufferGetDataLength (block_buf);
  } else {
    goto error;
  }

  buf = gst_buffer_new ();

  meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf,
      gst_core_media_meta_get_info (), NULL);
  CVBufferRetain ((CVBufferRef)sample_buf);
  meta->sample_buf = sample_buf;
  meta->image_buf = image_buf;
  meta->pixel_buf = pixel_buf;
  meta->block_buf = block_buf;

  gst_buffer_append_memory (buf,
      gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data,
          size, 0, size, NULL, NULL));

  return buf;

error:
  return NULL;
}
Esempio n. 12
0
int CVideoEncodeVt::CopyFrameToPixelBuffer(const AVFrame* pFrame, CVPixelBufferRef aPixelBuffer,
                                           const int* apStrides, const int* apRows)
{
    if(NULL == aPixelBuffer)
    {
        return -1;
    }
    
    
    int iPlaneCnt = 0;

    int ret = CVPixelBufferLockBaseAddress(aPixelBuffer, 0);
    if(0 != ret)
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CVPixelBufferPoolCreatePixelBuffer failed!");
        return -1;
    }
    
    
    if(CVPixelBufferIsPlanar(aPixelBuffer))
    {
        iPlaneCnt = (int)CVPixelBufferGetPlaneCount(aPixelBuffer);
        
        for(int i = 0; pFrame->data[i]; i++)
        {
            if(i == iPlaneCnt)
            {
                CVPixelBufferUnlockBaseAddress(aPixelBuffer, 0);
                
                return -1;
            }
            
            uint8_t* pSrc = pFrame->data[i];
            uint8_t* pDst = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(aPixelBuffer, i);
            int iSrcStride = apStrides[i];
            int iDstStride = (int)CVPixelBufferGetBytesPerRowOfPlane(aPixelBuffer, i);
            
            if(iSrcStride == iDstStride)
            {
                memcpy(pDst, pSrc, iSrcStride * apRows[i]);
            }
            else
            {
                int iCopyBytes = iDstStride < iSrcStride ? iDstStride : iSrcStride;
                for(int j = 0; j < apRows[i]; j++)
                {
                    memcpy(pDst + j * iDstStride, pSrc + j * iSrcStride, iCopyBytes);
                }
            }
        }
    }
    else
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "aPixelBuffer muse be yuv420p!");
        
        CVPixelBufferUnlockBaseAddress(aPixelBuffer, 0);
        return -1;
    }
    
    CVPixelBufferUnlockBaseAddress(aPixelBuffer, 0);
    
    
    return 0;
}
Esempio n. 13
0
// Copy and return a decoded frame.
nsresult
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
                            nsAutoPtr<FrameRef> aFrameRef)
{
  size_t width = CVPixelBufferGetWidth(aImage);
  size_t height = CVPixelBufferGetHeight(aImage);
  LOG("  got decoded frame data... %ux%u %s", width, height,
      CVPixelBufferIsPlanar(aImage) ? "planar" : "chunked");
#ifdef DEBUG
  size_t planes = CVPixelBufferGetPlaneCount(aImage);
  for (size_t i = 0; i < planes; ++i) {
    size_t stride = CVPixelBufferGetBytesPerRowOfPlane(aImage, i);
    LOG("     plane %u %ux%u rowbytes %u",
        (unsigned)i,
        CVPixelBufferGetWidthOfPlane(aImage, i),
        CVPixelBufferGetHeightOfPlane(aImage, i),
        (unsigned)stride);
  }
  MOZ_ASSERT(planes == 2);
#endif // DEBUG

  VideoData::YCbCrBuffer buffer;

  // Lock the returned image data.
  CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  if (rv != kCVReturnSuccess) {
    NS_ERROR("error locking pixel data");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }
  // Y plane.
  buffer.mPlanes[0].mData =
    static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
  buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
  buffer.mPlanes[0].mWidth = width;
  buffer.mPlanes[0].mHeight = height;
  buffer.mPlanes[0].mOffset = 0;
  buffer.mPlanes[0].mSkip = 0;
  // Cb plane.
  buffer.mPlanes[1].mData =
    static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
  buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
  buffer.mPlanes[1].mWidth = (width+1) / 2;
  buffer.mPlanes[1].mHeight = (height+1) / 2;
  buffer.mPlanes[1].mOffset = 0;
  buffer.mPlanes[1].mSkip = 1;
  // Cr plane.
  buffer.mPlanes[2].mData =
    static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
  buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
  buffer.mPlanes[2].mWidth = (width+1) / 2;
  buffer.mPlanes[2].mHeight = (height+1) / 2;
  buffer.mPlanes[2].mOffset = 1;
  buffer.mPlanes[2].mSkip = 1;

  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(width, height);
  info.mHasVideo = true;
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mConfig.display_width,
                                      mConfig.display_height);

  // Copy the image data into our own format.
  nsAutoPtr<VideoData> data;
  data =
    VideoData::Create(info,
                      mImageContainer,
                      nullptr,
                      aFrameRef->byte_offset,
                      aFrameRef->composition_timestamp,
                      aFrameRef->duration,
                      buffer,
                      aFrameRef->is_sync_point,
                      aFrameRef->decode_timestamp,
                      visible);
  // Unlock the returned image data.
  CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  mReorderQueue.Push(data.forget());
  // Assume a frame with a PTS <= current DTS is ready.
  while (mReorderQueue.Length() > 0) {
    VideoData* readyData = mReorderQueue.Pop();
    if (readyData->mTime <= aFrameRef->decode_timestamp) {
      LOG("returning queued frame with pts %lld", readyData->mTime);
      mCallback->Output(readyData);
    } else {
      LOG("requeued frame with pts %lld > %lld",
          readyData->mTime, aFrameRef->decode_timestamp);
      mReorderQueue.Push(readyData);
      break;
    }
  }
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
Esempio n. 14
0
size_t QTPixelBuffer::bytesPerRowOfPlane(size_t plane) const
{
    return CVPixelBufferGetBytesPerRowOfPlane(m_pixelBuffer, plane);
}
Esempio n. 15
0
static void h264_enc_process(MSFilter *f) {
	VTH264EncCtx *ctx = (VTH264EncCtx *)f->data;
	mblk_t *frame;
	OSStatus err;
	CMTime p_time = CMTimeMake(f->ticker->time, 1000);

	if(!ctx->is_configured) {
		ms_queue_flush(f->inputs[0]);
		return;
	}

#if 0 && TARGET_OS_IPHONE
	CVPixelBufferPoolRef pixbuf_pool = VTCompressionSessionGetPixelBufferPool(ctx->session);
	if(pixbuf_pool == NULL) {
		ms_error("VideoToolbox: fails to get the pixel buffer pool");
		return;
	}
#endif

	while((frame = ms_queue_get(f->inputs[0]))) {
		YuvBuf src_yuv_frame, dst_yuv_frame = {0};
		CVPixelBufferRef pixbuf;
		CFMutableDictionaryRef enc_param = NULL;
		int i, pixbuf_fmt = kCVPixelFormatType_420YpCbCr8Planar;
		CFNumberRef value;
		CFMutableDictionaryRef pixbuf_attr;

		ms_yuv_buf_init_from_mblk(&src_yuv_frame, frame);

#if 0 && TARGET_OS_IPHONE
		CVPixelBufferPoolCreatePixelBuffer(NULL, pixbuf_pool, &pixbuf);
#else
		pixbuf_attr = CFDictionaryCreateMutable(NULL, 0, NULL, NULL);
		value = CFNumberCreate(NULL, kCFNumberIntType, &pixbuf_fmt);
		CFDictionarySetValue(pixbuf_attr, kCVPixelBufferPixelFormatTypeKey, value);
		CVPixelBufferCreate(NULL, ctx->conf.vsize.width, ctx->conf.vsize.height, kCVPixelFormatType_420YpCbCr8Planar, pixbuf_attr,  &pixbuf);
		CFRelease(pixbuf_attr);
#endif

		CVPixelBufferLockBaseAddress(pixbuf, 0);
		dst_yuv_frame.w = (int)CVPixelBufferGetWidth(pixbuf);
		dst_yuv_frame.h = (int)CVPixelBufferGetHeight(pixbuf);
		for(i=0; i<3; i++) {
			dst_yuv_frame.planes[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
			dst_yuv_frame.strides[i] = (int)CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
		}
		ms_yuv_buf_copy(src_yuv_frame.planes, src_yuv_frame.strides, dst_yuv_frame.planes, dst_yuv_frame.strides, (MSVideoSize){dst_yuv_frame.w, dst_yuv_frame.h});
		CVPixelBufferUnlockBaseAddress(pixbuf, 0);
		freemsg(frame);

		ms_filter_lock(f);
		if(ctx->fps_changed || ctx->bitrate_changed || ctx->vfu_requested) {
			CFNumberRef value;
			enc_param = CFDictionaryCreateMutable(NULL, 0, NULL, NULL);
			if(ctx->fps_changed) {
				value = CFNumberCreate(NULL, kCFNumberFloatType, &ctx->conf.fps);
				CFDictionaryAddValue(enc_param, kVTCompressionPropertyKey_ExpectedFrameRate, value);
				ctx->fps_changed = FALSE;
			}
			if(ctx->bitrate_changed) {
				value = CFNumberCreate(NULL, kCFNumberIntType, &ctx->conf.required_bitrate);
				CFDictionaryAddValue(enc_param, kVTCompressionPropertyKey_AverageBitRate, value);
				ctx->bitrate_changed = FALSE;
			}
			if(ctx->vfu_requested) {
				int force_keyframe = 1;
				value = CFNumberCreate(NULL, kCFNumberIntType, &force_keyframe);
				CFDictionaryAddValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame, value);
				ctx->vfu_requested = FALSE;
			}
		}
		ms_filter_unlock(f);

		if(!ctx->enable_avpf) {
			if(ctx->first_frame) {
				ms_video_starter_first_frame(&ctx->starter, f->ticker->time);
			}
			if(ms_video_starter_need_i_frame(&ctx->starter, f->ticker->time)) {
				if(enc_param == NULL) enc_param = CFDictionaryCreateMutable(NULL, 0, NULL, NULL);
				if(CFDictionaryGetValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame) == NULL) {
					int force_keyframe = 1;
					CFNumberRef value = CFNumberCreate(NULL, kCFNumberIntType, &force_keyframe);
					CFDictionaryAddValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame, value);
				}
			}
		}

		if((err = VTCompressionSessionEncodeFrame(ctx->session, pixbuf, p_time, kCMTimeInvalid, enc_param, NULL, NULL)) != noErr) {
			ms_error("VideoToolbox: could not pass a pixbuf to the encoder: error code %d", err);
		}
		CFRelease(pixbuf);

		ctx->first_frame = FALSE;

		if(enc_param) CFRelease(enc_param);
	}

	ms_mutex_lock(&ctx->mutex);
	while ((frame = ms_queue_get(&ctx->queue))) {
		ms_mutex_unlock(&ctx->mutex);
		ms_queue_put(f->outputs[0], frame);
		ms_mutex_lock(&ctx->mutex);
	}
	ms_mutex_unlock(&ctx->mutex);
}
Esempio n. 16
0
// Copy and return a decoded frame.
nsresult
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
                            AppleVTDecoder::AppleFrameRef aFrameRef)
{
  if (mIsShutDown || mIsFlushing) {
    // We are in the process of flushing or shutting down; ignore frame.
    return NS_OK;
  }

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
      aFrameRef.byte_offset,
      aFrameRef.decode_timestamp.ToMicroseconds(),
      aFrameRef.composition_timestamp.ToMicroseconds(),
      aFrameRef.duration.ToMicroseconds(),
      aFrameRef.is_sync_point ? " keyframe" : ""
  );

  if (!aImage) {
    // Image was dropped by decoder or none return yet.
    // We need more input to continue.
    mCallback->InputExhausted();
    return NS_OK;
  }

  bool useNullSample = false;
  if (mSeekTargetThreshold.isSome()) {
    if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
      useNullSample = true;
    } else {
      mSeekTargetThreshold.reset();
    }
  }

  // Where our resulting image will end up.
  RefPtr<MediaData> data;
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mPictureWidth,
                                      mPictureHeight);

  if (useNullSample) {
    data = new NullData(aFrameRef.byte_offset,
                        aFrameRef.composition_timestamp.ToMicroseconds(),
                        aFrameRef.duration.ToMicroseconds());
  } else if (mUseSoftwareImages) {
    size_t width = CVPixelBufferGetWidth(aImage);
    size_t height = CVPixelBufferGetHeight(aImage);
    DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
    MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");

    VideoData::YCbCrBuffer buffer;

    // Lock the returned image data.
    CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
    if (rv != kCVReturnSuccess) {
      NS_ERROR("error locking pixel data");
      mCallback->Error(MediaDataDecoderError::DECODE_ERROR);
      return NS_ERROR_FAILURE;
    }
    // Y plane.
    buffer.mPlanes[0].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
    buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
    buffer.mPlanes[0].mWidth = width;
    buffer.mPlanes[0].mHeight = height;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;
    // Cb plane.
    buffer.mPlanes[1].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[1].mWidth = (width+1) / 2;
    buffer.mPlanes[1].mHeight = (height+1) / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 1;
    // Cr plane.
    buffer.mPlanes[2].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[2].mWidth = (width+1) / 2;
    buffer.mPlanes[2].mHeight = (height+1) / 2;
    buffer.mPlanes[2].mOffset = 1;
    buffer.mPlanes[2].mSkip = 1;

    // Copy the image data into our own format.
    data =
      VideoData::CreateAndCopyData(info,
                                   mImageContainer,
                                   aFrameRef.byte_offset,
                                   aFrameRef.composition_timestamp.ToMicroseconds(),
                                   aFrameRef.duration.ToMicroseconds(),
                                   buffer,
                                   aFrameRef.is_sync_point,
                                   aFrameRef.decode_timestamp.ToMicroseconds(),
                                   visible);
    // Unlock the returned image data.
    CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  } else {
#ifndef MOZ_WIDGET_UIKIT
    IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
    MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

    RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);

    RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);

    data =
      VideoData::CreateFromImage(info,
                                 aFrameRef.byte_offset,
                                 aFrameRef.composition_timestamp.ToMicroseconds(),
                                 aFrameRef.duration.ToMicroseconds(),
                                 image.forget(),
                                 aFrameRef.is_sync_point,
                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                 visible);
#else
    MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
  }

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  MonitorAutoLock mon(mMonitor);
  mReorderQueue.Push(data);
  if (mReorderQueue.Length() > mMaxRefFrames) {
    mCallback->Output(mReorderQueue.Pop().get());
  }
  mCallback->InputExhausted();
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
Esempio n. 17
0
static gboolean
gst_core_media_buffer_wrap_pixel_buffer (GstBuffer * buf, GstVideoInfo * info,
    CVPixelBufferRef pixel_buf, gboolean * has_padding, gboolean map)
{
  guint n_planes;
  gsize offset[GST_VIDEO_MAX_PLANES] = { 0 };
  gint stride[GST_VIDEO_MAX_PLANES] = { 0 };
  GstVideoMeta *video_meta;
  UInt32 size;

  if (map && CVPixelBufferLockBaseAddress (pixel_buf, 0) != kCVReturnSuccess) {
    GST_ERROR ("Could not lock pixel buffer base address");
    return FALSE;
  }

  *has_padding = FALSE;

  if (CVPixelBufferIsPlanar (pixel_buf)) {
    gint i, size = 0, plane_offset = 0;

    n_planes = CVPixelBufferGetPlaneCount (pixel_buf);
    for (i = 0; i < n_planes; i++) {
      stride[i] = CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, i);

      if (stride[i] != GST_VIDEO_INFO_PLANE_STRIDE (info, i)) {
        *has_padding = TRUE;
      }

      size = stride[i] * CVPixelBufferGetHeightOfPlane (pixel_buf, i);
      offset[i] = plane_offset;
      plane_offset += size;

      if (map) {
        gst_buffer_append_memory (buf,
            gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
                CVPixelBufferGetBaseAddressOfPlane (pixel_buf, i), size, 0,
                size, NULL, NULL));
      }
    }
  } else {

    n_planes = 1;
    stride[0] = CVPixelBufferGetBytesPerRow (pixel_buf);
    offset[0] = 0;
    size = stride[0] * CVPixelBufferGetHeight (pixel_buf);

    if (map) {
      gst_buffer_append_memory (buf,
          gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE,
              CVPixelBufferGetBaseAddress (pixel_buf), size, 0, size, NULL,
              NULL));
    }
  }

  video_meta =
      gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
      GST_VIDEO_INFO_FORMAT (info), info->width, info->height, n_planes, offset,
      stride);

  return TRUE;
}
Esempio n. 18
0
static void
emit_frame(vda_decoder_t *vdad, vda_frame_t *vf, media_queue_t *mq)
{
  CGSize siz;

  frame_info_t fi;
  memset(&fi, 0, sizeof(fi));

  if(vdad->vdad_last_pts != PTS_UNSET && vf->vf_pts != PTS_UNSET) {
    int64_t d = vf->vf_pts - vdad->vdad_last_pts;

    if(d > 1000 && d < 1000000)
      vdad->vdad_estimated_duration = d;
  }

  siz = CVImageBufferGetDisplaySize(vf->vf_buf);
  fi.fi_dar_num = siz.width;
  fi.fi_dar_den = siz.height;

  fi.fi_pts = vf->vf_pts;
  fi.fi_color_space = -1;
  fi.fi_epoch = vf->vf_epoch;
  fi.fi_drive_clock = 1;
  fi.fi_vshift = 1;
  fi.fi_hshift = 1;
  fi.fi_duration = vf->vf_duration > 10000 ? vf->vf_duration : vdad->vdad_estimated_duration;

  siz = CVImageBufferGetEncodedSize(vf->vf_buf);
  fi.fi_width = siz.width;
  fi.fi_height = siz.height;


  video_decoder_t *vd = vdad->vdad_vd;
  vd->vd_estimated_duration = fi.fi_duration; // For bitrate calculations

  if(vdad->vdad_zero_copy) {

    fi.fi_type = 'VDA';
    fi.fi_data[0] = (void *)vf->vf_buf;
    if(fi.fi_duration > 0)
      video_deliver_frame(vd, &fi);

  } else {

    fi.fi_type = 'YUVP';

    CVPixelBufferLockBaseAddress(vf->vf_buf, 0);

    for(int i = 0; i < 3; i++ ) {
      fi.fi_data[i] = CVPixelBufferGetBaseAddressOfPlane(vf->vf_buf, i);
      fi.fi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(vf->vf_buf, i);
    }


    if(fi.fi_duration > 0)
      video_deliver_frame(vd, &fi);

    CVPixelBufferUnlockBaseAddress(vf->vf_buf, 0);

  }

  vdad->vdad_last_pts = vf->vf_pts;

  char fmt[64];
  snprintf(fmt, sizeof(fmt), "h264 (VDA) %d x %d", fi.fi_width, fi.fi_height);
  prop_set_string(mq->mq_prop_codec, fmt);
}
Esempio n. 19
0
bool CDVDVideoCodecVDA::GetPicture(DVDVideoPicture* pDvdVideoPicture)
{
  // get the top yuv frame, we risk getting the wrong frame if the frame queue
  // depth is less than the number of encoded reference frames. If queue depth
  // is greater than the number of encoded reference frames, then the top frame
  // will never change and we can just grab a ref to the top frame.
  if (m_use_cvBufferRef)
  {
    pthread_mutex_lock(&m_queue_mutex);
    pDvdVideoPicture->dts             = m_display_queue->dts;
    pDvdVideoPicture->pts             = m_display_queue->pts;
    pDvdVideoPicture->cvBufferRef     = m_display_queue->pixel_buffer_ref;
    m_display_queue->pixel_buffer_ref = NULL;
    pthread_mutex_unlock(&m_queue_mutex);

    pDvdVideoPicture->format          = RENDER_FMT_CVBREF;
    pDvdVideoPicture->iFlags          = DVP_FLAG_ALLOCATED;
    pDvdVideoPicture->color_range     = 0;
    pDvdVideoPicture->color_matrix    = 4;
    pDvdVideoPicture->iWidth          = CVPixelBufferGetWidth(pDvdVideoPicture->cvBufferRef);
    pDvdVideoPicture->iHeight         = CVPixelBufferGetHeight(pDvdVideoPicture->cvBufferRef);
    pDvdVideoPicture->iDisplayWidth   = pDvdVideoPicture->iWidth;
    pDvdVideoPicture->iDisplayHeight  = pDvdVideoPicture->iHeight;
  }
  else
  {
    FourCharCode pixel_buffer_format;
    CVPixelBufferRef picture_buffer_ref;

    // clone the video picture buffer settings.
    *pDvdVideoPicture = m_videobuffer;

    // get the top yuv frame, we risk getting the wrong frame if the frame queue
    // depth is less than the number of encoded reference frames. If queue depth
    // is greater than the number of encoded reference frames, then the top frame
    // will never change and we can just grab a ref to the top frame. This way
    // we don't lockout the vdadecoder while doing color format convert.
    pthread_mutex_lock(&m_queue_mutex);
    picture_buffer_ref = m_display_queue->pixel_buffer_ref;
    pixel_buffer_format = m_display_queue->pixel_buffer_format;
    pDvdVideoPicture->dts = m_display_queue->dts;
    pDvdVideoPicture->pts = m_display_queue->pts;
    pthread_mutex_unlock(&m_queue_mutex);

    // lock the CVPixelBuffer down
    CVPixelBufferLockBaseAddress(picture_buffer_ref, 0);
    int row_stride = CVPixelBufferGetBytesPerRowOfPlane(picture_buffer_ref, 0);
    uint8_t *base_ptr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(picture_buffer_ref, 0);
    if (base_ptr)
    {
      if (pixel_buffer_format == kCVPixelFormatType_422YpCbCr8)
        UYVY422_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
      else if (pixel_buffer_format == kCVPixelFormatType_32BGRA)
        BGRA_to_YUV420P(base_ptr, row_stride, pDvdVideoPicture);
    }
    // unlock the CVPixelBuffer
    CVPixelBufferUnlockBaseAddress(picture_buffer_ref, 0);
  }

  // now we can pop the top frame.
  DisplayQueuePop();
  //CLog::Log(LOGNOTICE, "%s - VDADecoderDecode dts(%f), pts(%f)", __FUNCTION__,
  //  pDvdVideoPicture->dts, pDvdVideoPicture->pts);

  return true;
}
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VTContext  *vt = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;
    char codec_str[32];

    av_frame_unref(vt->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    case kCVPixelFormatType_32BGRA:           vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
    default:
        av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
        av_log(NULL, AV_LOG_ERROR,
               "%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
        return AVERROR(ENOSYS);
    }

    vt->tmp_frame->width  = frame->width;
    vt->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vt->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
                  (const uint8_t **)data, linesize, vt->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vt->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vt->tmp_frame);

    return 0;
}