示例#1
0
nsresult
AppleVTDecoder::WaitForAsynchronousFrames()
{
  OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
  if (rv != noErr) {
    LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
    return NS_ERROR_FAILURE;
  }
  return NS_OK;
}
示例#2
0
文件: vtb.c 项目: Ezio-PS/movian
static void
vtb_flush(struct media_codec *mc, struct video_decoder *vd)
{
  vtb_decoder_t *vtbd = mc->opaque;
  VTDecompressionSessionWaitForAsynchronousFrames(vtbd->vtbd_session);
  hts_mutex_lock(&vtbd->vtbd_mutex);
  destroy_frames(vtbd);
  vtbd->vtbd_max_ts   = PTS_UNSET;
  vtbd->vtbd_flush_to = PTS_UNSET;
  vtbd->vtbd_last_pts = PTS_UNSET;
  hts_mutex_unlock(&vtbd->vtbd_mutex);
}
示例#3
0
文件: vtb.c 项目: Ezio-PS/movian
static void
vtb_close(struct media_codec *mc)
{
  vtb_decoder_t *vtbd = mc->opaque;
  VTDecompressionSessionWaitForAsynchronousFrames(vtbd->vtbd_session);
  destroy_frames(vtbd);

  VTDecompressionSessionInvalidate(vtbd->vtbd_session);
  CFRelease(vtbd->vtbd_session);

  CFRelease(vtbd->vtbd_fmt);
  free(vtbd);
}
示例#4
0
static GstFlowReturn
gst_vtdec_push_frames_if_needed (GstVtdec * vtdec, gboolean drain,
    gboolean flush)
{
  GstVideoCodecFrame *frame;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (vtdec);

  /* negotiate now so that we know whether we need to use the GL upload meta or
   * not */
  if (gst_pad_check_reconfigure (decoder->srcpad)) {
    if (!gst_video_decoder_negotiate (decoder)) {
      gst_pad_mark_reconfigure (decoder->srcpad);
      if (GST_PAD_IS_FLUSHING (decoder->srcpad))
        ret = GST_FLOW_FLUSHING;
      else
        ret = GST_FLOW_NOT_NEGOTIATED;
      return ret;
    }
  }

  if (drain)
    VTDecompressionSessionWaitForAsynchronousFrames (vtdec->session);

  /* push a buffer if there are enough frames to guarantee that we push in PTS
   * order
   */
  while ((g_async_queue_length (vtdec->reorder_queue) >=
          vtdec->reorder_queue_length) || drain || flush) {
    frame = (GstVideoCodecFrame *) g_async_queue_try_pop (vtdec->reorder_queue);

    /* we need to check this in case reorder_queue_length=0 (jpeg for
     * example) or we're draining/flushing
     */
    if (frame) {
      if (flush || frame->flags & VTDEC_FRAME_FLAG_SKIP)
        gst_video_decoder_release_frame (decoder, frame);
      else if (frame->flags & VTDEC_FRAME_FLAG_DROP)
        gst_video_decoder_drop_frame (decoder, frame);
      else
        ret = gst_video_decoder_finish_frame (decoder, frame);
    }

    if (!frame || ret != GST_FLOW_OK)
      break;
  }

  return ret;
}
示例#5
0
static GstFlowReturn
gst_vtdec_push_frames_if_needed (GstVtdec * vtdec, gboolean drain,
    gboolean flush)
{
  GstVideoCodecFrame *frame;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (vtdec);

  /* FIXME: Instead of this, implement GstVideoDecoder::negotiate() and
   * just call gst_video_decoder_negotiate()
   */
  /* negotiate now so that we know whether we need to use the GL upload meta or
   * not */
  if (gst_pad_check_reconfigure (decoder->srcpad))
    gst_video_decoder_negotiate (decoder);

  if (drain)
    VTDecompressionSessionWaitForAsynchronousFrames (vtdec->session);

  /* push a buffer if there are enough frames to guarantee that we push in PTS
   * order
   */
  while ((g_async_queue_length (vtdec->reorder_queue) >=
          vtdec->reorder_queue_length) || drain || flush) {
    frame = (GstVideoCodecFrame *) g_async_queue_try_pop (vtdec->reorder_queue);
    if (frame && vtdec->texture_cache != NULL) {
      frame->output_buffer =
          gst_core_video_texture_cache_get_gl_buffer (vtdec->texture_cache,
          frame->output_buffer);
      if (!frame->output_buffer)
        GST_ERROR_OBJECT (vtdec, "couldn't get textures from buffer");
    }

    /* we need to check this in case reorder_queue_length=0 (jpeg for
     * example) or we're draining/flushing
     */
    if (frame) {
      if (flush)
        gst_video_decoder_drop_frame (decoder, frame);
      else
        ret = gst_video_decoder_finish_frame (decoder, frame);
    }

    if (!frame || ret != GST_FLOW_OK)
      break;
  }

  return ret;
}
示例#6
0
static GF_Err VTBDec_ProcessData(GF_MediaDecoder *ifcg,
                               char *inBuffer, u32 inBufferLength,
                               u16 ES_ID, u32 *CTS,
                               char *outBuffer, u32 *outBufferLength,
                               u8 PaddingBits, u32 mmlevel)
{
    OSStatus status;
    CMSampleBufferRef sample = NULL;
    CMBlockBufferRef block_buffer = NULL;
	OSType type;
	char *in_data;
	u32 in_data_size;
	
	GF_Err e;
	VTBDec *ctx = (VTBDec *)ifcg->privateStack;
	
	if (ctx->skip_mpeg4_vosh) {
		GF_M4VDecSpecInfo dsi;
		dsi.width = dsi.height = 0;
		e = gf_m4v_get_config(inBuffer, inBufferLength, &dsi);
		//found a vosh - remove it from payload, init decoder if needed
		if ((e==GF_OK) && dsi.width && dsi.height) {
			if (!ctx->vtb_session) {
				ctx->vosh = inBuffer;
				ctx->vosh_size = dsi.next_object_start;
				e = VTBDec_InitDecoder(ctx, GF_FALSE);
				if (e) return e;

				//enfoce removal for all frames
				ctx->skip_mpeg4_vosh = GF_TRUE;
				
				if (ctx->out_size != *outBufferLength) {
					*outBufferLength = ctx->out_size;
					return GF_BUFFER_TOO_SMALL;
				}
			}
			ctx->vosh_size = dsi.next_object_start;
		} else if (!ctx->vtb_session) {
			*outBufferLength=0;
			return GF_OK;
		}
	}

	if (ctx->init_mpeg12) {
		GF_M4VDecSpecInfo dsi;
		dsi.width = dsi.height = 0;
		
		e = gf_mpegv12_get_config(inBuffer, inBufferLength, &dsi);
		if ((e==GF_OK) && dsi.width && dsi.height) {
			ctx->width = dsi.width;
			ctx->height = dsi.height;
			ctx->pixel_ar = dsi.par_num;
			ctx->pixel_ar <<= 16;
			ctx->pixel_ar |= dsi.par_den;
			
			e = VTBDec_InitDecoder(ctx, GF_FALSE);
			if (e) return e;

			if (ctx->out_size != *outBufferLength) {
				*outBufferLength = ctx->out_size;
				return GF_BUFFER_TOO_SMALL;
			}
		}

		if (!ctx->vtb_session) {
			*outBufferLength=0;
			return GF_OK;
		}
	}

	if (ctx->is_annex_b || (!ctx->vtb_session && ctx->nalu_size_length) ) {
		if (ctx->cached_annex_b) {
			in_data = ctx->cached_annex_b;
			in_data_size = ctx->cached_annex_b_size;
			ctx->cached_annex_b = NULL;
		} else {
			e = VTB_RewriteNALs(ctx, inBuffer, inBufferLength, &in_data, &in_data_size);
			if (e) return e;
		}
		
		if (ctx->out_size != *outBufferLength) {
			*outBufferLength = ctx->out_size;
			ctx->cached_annex_b = in_data;
			ctx->cached_annex_b_size = in_data_size;

			return GF_BUFFER_TOO_SMALL;
		}
	} else if (ctx->vosh_size) {
		in_data = inBuffer + ctx->vosh_size;
		in_data_size = inBufferLength - ctx->vosh_size;
		ctx->vosh_size = 0;
	} else {
		in_data = inBuffer;
		in_data_size = inBufferLength;
	}
	
	if (!ctx->vtb_session) {
		*outBufferLength=0;
		return GF_OK;
	}
	

	status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, in_data, in_data_size, kCFAllocatorNull, NULL, 0, in_data_size, 0, &block_buffer);

	if (status) {
		return GF_IO_ERR;
	}

	*outBufferLength=0;
	if (block_buffer == NULL)
		return GF_OK;
		
	
	status = CMSampleBufferCreate(kCFAllocatorDefault, block_buffer, TRUE, NULL, NULL, ctx->fmt_desc, 1, 0, NULL, 0, NULL, &sample);

    if (status || (sample==NULL)) {
		if (block_buffer)
			CFRelease(block_buffer);
		return GF_IO_ERR;
	}
	ctx->last_error = GF_OK;
    status = VTDecompressionSessionDecodeFrame(ctx->vtb_session, sample, 0, NULL, 0);
    if (!status)
		status = VTDecompressionSessionWaitForAsynchronousFrames(ctx->vtb_session);
	

	CFRelease(block_buffer);
	CFRelease(sample);
	if (ctx->cached_annex_b)
		gf_free(in_data);
	
	if (ctx->last_error) return ctx->last_error;
	if (status) return GF_NON_COMPLIANT_BITSTREAM;
	
	if (!ctx->frame) {
		*outBufferLength=0;
		return ctx->last_error;
	}
	
	*outBufferLength = ctx->out_size;
	
	status = CVPixelBufferLockBaseAddress(ctx->frame, kCVPixelBufferLock_ReadOnly);
    if (status != kCVReturnSuccess) {
        GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[VTB] Error locking frame data\n"));
        return GF_IO_ERR;
    }
	type = CVPixelBufferGetPixelFormatType(ctx->frame);
	
    if (CVPixelBufferIsPlanar(ctx->frame)) {
        u32 i, j, nb_planes = (u32) CVPixelBufferGetPlaneCount(ctx->frame);
		char *dst = outBuffer;
		Bool needs_stride=GF_FALSE;
		if ((type==kCVPixelFormatType_420YpCbCr8Planar)
			|| (type==kCVPixelFormatType_420YpCbCr8PlanarFullRange)
			|| (type==kCVPixelFormatType_422YpCbCr8_yuvs)
			|| (type==kCVPixelFormatType_444YpCbCr8)
			|| (type=='444v')
		
		) {
			u32 stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, 0);
			
			//TOCHECK - for now the 3 planes are consecutive in VideoToolbox
			if (stride==ctx->width) {
				char *data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, 0);
				memcpy(dst, data, sizeof(char)*ctx->out_size);
			} else {
				for (i=0; i<nb_planes; i++) {
					char *data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, i);
					u32 stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, i);
					u32 w, h = (u32) CVPixelBufferGetHeightOfPlane(ctx->frame, i);
					w = ctx->width;
					if (i) {
						switch (ctx->pix_fmt) {
						case GF_PIXEL_YUV444:
							break;
						case GF_PIXEL_YUV422:
						case GF_PIXEL_YV12:
							w /= 2;
							break;
						}
					}
					if (stride != w) {
						needs_stride=GF_TRUE;
						for (j=0; j<h; j++) {
							memcpy(dst, data, sizeof(char)*w);
							dst += w;
							data += stride;
						}
					} else {
						memcpy(dst, data, sizeof(char)*h*stride);
						dst += sizeof(char)*h*stride;
					}
				}
			}
        } else if ((type==kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange) || (type==kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)) {
			char *dst_v;
			char *data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, 0);
			u32 stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, 0);
			u32 i, h = (u32) CVPixelBufferGetHeightOfPlane(ctx->frame, 0);

			if (stride==ctx->width) {
				memcpy(dst, data, sizeof(char)*h*stride);
				dst += sizeof(char)*h*stride;
			} else {
				for (i=0; i<h; i++) {
					memcpy(dst, data, sizeof(char)*ctx->width);
					dst += ctx->width;
					data += stride;
				}
				needs_stride=GF_TRUE;
			}
			
			data = CVPixelBufferGetBaseAddressOfPlane(ctx->frame, 1);
			stride = (u32) CVPixelBufferGetBytesPerRowOfPlane(ctx->frame, 1);
			h = (u32) CVPixelBufferGetHeightOfPlane(ctx->frame, 1);
			dst_v = dst+sizeof(char) * h*stride/2;

			for (i=0; i<ctx->width * h / 2; i++) {
				*dst = data[0];
				*dst_v = data[1];
				data += 2;
				dst_v++;
				dst++;
				
				if (!(i%ctx->width)) data += (stride - ctx->width);

			}

		}
    }

    CVPixelBufferUnlockBaseAddress(ctx->frame, kCVPixelBufferLock_ReadOnly);

	return GF_OK;
}