Exemple #1
0
static GstFlowReturn
gst_vtdec_finish (GstVideoDecoder * decoder)
{
  GstVtdec *vtdec = GST_VTDEC (decoder);

  GST_DEBUG_OBJECT (vtdec, "finish");

  return gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);
}
Exemple #2
0
static gboolean
gst_vtdec_start (GstVideoDecoder * decoder)
{
  GstVtdec *vtdec = GST_VTDEC (decoder);

  GST_DEBUG_OBJECT (vtdec, "start");

  return TRUE;
}
Exemple #3
0
static gboolean
gst_vtdec_flush (GstVideoDecoder * decoder)
{
  GstVtdec *vtdec = GST_VTDEC (decoder);

  GST_DEBUG_OBJECT (vtdec, "flush");

  gst_vtdec_push_frames_if_needed (vtdec, FALSE, TRUE);

  return TRUE;
}
Exemple #4
0
static void
gst_vtdec_set_context (GstElement * element, GstContext * context)
{
  GstVtdec *vtdec = GST_VTDEC (element);

  GST_INFO_OBJECT (element, "setting context %s",
      gst_context_get_context_type (context));
  gst_gl_handle_set_context (element, context,
      &vtdec->ctxh->display, &vtdec->ctxh->other_context);
  GST_ELEMENT_CLASS (gst_vtdec_parent_class)->set_context (element, context);
}
Exemple #5
0
void
gst_vtdec_finalize (GObject * object)
{
  GstVtdec *vtdec = GST_VTDEC (object);

  GST_DEBUG_OBJECT (vtdec, "finalize");

  g_async_queue_unref (vtdec->reorder_queue);
  gst_gl_context_helper_free (vtdec->ctxh);

  G_OBJECT_CLASS (gst_vtdec_parent_class)->finalize (object);
}
Exemple #6
0
static gboolean
gst_vtdec_set_format (GstVideoDecoder * decoder, GstVideoCodecState * state)
{
  GstStructure *structure;
  CMVideoCodecType cm_format = 0;
  CMFormatDescriptionRef format_description = NULL;
  const char *caps_name;
  GstVtdec *vtdec = GST_VTDEC (decoder);

  GST_DEBUG_OBJECT (vtdec, "set_format");

  structure = gst_caps_get_structure (state->caps, 0);
  caps_name = gst_structure_get_name (structure);
  if (!strcmp (caps_name, "video/x-h264")) {
    cm_format = kCMVideoCodecType_H264;
  } else if (!strcmp (caps_name, "video/mpeg")) {
    cm_format = kCMVideoCodecType_MPEG2Video;
  } else if (!strcmp (caps_name, "image/jpeg")) {
    cm_format = kCMVideoCodecType_JPEG;
  }

  if (cm_format == kCMVideoCodecType_H264 && state->codec_data == NULL) {
    GST_INFO_OBJECT (vtdec, "no codec data, wait for one");
    return TRUE;
  }

  if (vtdec->session)
    gst_vtdec_invalidate_session (vtdec);

  gst_video_info_from_caps (&vtdec->video_info, state->caps);

  if (!gst_vtdec_compute_reorder_queue_length (vtdec, cm_format,
          state->codec_data))
    return FALSE;
  gst_vtdec_set_latency (vtdec);

  if (state->codec_data) {
    format_description = create_format_description_from_codec_data (vtdec,
        cm_format, state->codec_data);
  } else {
    format_description = create_format_description (vtdec, cm_format);
  }

  if (vtdec->format_description)
    CFRelease (vtdec->format_description);
  vtdec->format_description = format_description;

  if (!gst_vtdec_negotiate_output_format (vtdec, state))
    return FALSE;

  return TRUE;
}
Exemple #7
0
static GstFlowReturn
gst_vtdec_handle_frame (GstVideoDecoder * decoder, GstVideoCodecFrame * frame)
{
  OSStatus status;
  CMSampleBufferRef cm_sample_buffer = NULL;
  VTDecodeFrameFlags input_flags, output_flags;
  GstVtdec *vtdec = GST_VTDEC (decoder);
  GstFlowReturn ret = GST_FLOW_OK;
  int decode_frame_number = frame->decode_frame_number;

  if (vtdec->format_description == NULL) {
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto out;
  }

  GST_LOG_OBJECT (vtdec, "got input frame %d", decode_frame_number);

  ret = gst_vtdec_push_frames_if_needed (vtdec, FALSE, FALSE);
  if (ret != GST_FLOW_OK)
    return ret;

  /* don't bother enabling kVTDecodeFrame_EnableTemporalProcessing at all since
   * it's not mandatory for the underlying VT codec to respect it. KISS and do
   * reordering ourselves.
   */
  input_flags = kVTDecodeFrame_EnableAsynchronousDecompression;
  output_flags = 0;

  cm_sample_buffer =
      cm_sample_buffer_from_gst_buffer (vtdec, frame->input_buffer);
  status =
      VTDecompressionSessionDecodeFrame (vtdec->session, cm_sample_buffer,
      input_flags, frame, NULL);
  if (status != noErr && FALSE)
    goto error;

  GST_LOG_OBJECT (vtdec, "submitted input frame %d", decode_frame_number);

out:
  if (cm_sample_buffer)
    CFRelease (cm_sample_buffer);
  return ret;

error:
  GST_ELEMENT_ERROR (vtdec, STREAM, DECODE, (NULL),
      ("VTDecompressionSessionDecodeFrame returned %d", (int) status));
  ret = GST_FLOW_ERROR;
  goto out;
}
Exemple #8
0
static gboolean
gst_vtdec_stop (GstVideoDecoder * decoder)
{
  GstVtdec *vtdec = GST_VTDEC (decoder);

  if (vtdec->session)
    gst_vtdec_invalidate_session (vtdec);

  if (vtdec->texture_cache)
    gst_core_video_texture_cache_free (vtdec->texture_cache);
  vtdec->texture_cache = NULL;

  GST_DEBUG_OBJECT (vtdec, "stop");

  return TRUE;
}
Exemple #9
0
static gboolean
gst_vtdec_negotiate (GstVideoDecoder * decoder)
{
  GstVideoCodecState *output_state = NULL;
  GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL;
  GstVideoFormat format;
  GstStructure *structure;
  const gchar *s;
  GstVtdec *vtdec;
  OSStatus err = noErr;
  GstCapsFeatures *features = NULL;
  gboolean output_textures;

  vtdec = GST_VTDEC (decoder);
  if (vtdec->session)
    gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (output_state) {
    prevcaps = gst_caps_ref (output_state->caps);
    gst_video_codec_state_unref (output_state);
  }

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);
  if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) {
    /* The hardware decoder can become (temporarily) unavailable across
     * VTDecompressionSessionCreate/Destroy calls. So if the currently configured
     * caps are still accepted by downstream we keep them so we don't have to
     * destroy and recreate the session.
     */
    GST_INFO_OBJECT (vtdec,
        "current and peer caps are compatible, keeping current caps");
    caps = gst_caps_ref (prevcaps);
  } else {
    templcaps =
        gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder));
    caps =
        gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (templcaps);
  }
  gst_caps_unref (peercaps);

  caps = gst_caps_truncate (gst_caps_make_writable (caps));
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  format = gst_video_format_from_string (s);
  features = gst_caps_get_features (caps, 0);
  if (features)
    features = gst_caps_features_copy (features);

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      format, vtdec->video_info.width, vtdec->video_info.height,
      vtdec->input_state);
  output_state->caps = gst_video_info_to_caps (&output_state->info);
  if (features) {
    gst_caps_set_features (output_state->caps, 0, features);
    output_textures =
        gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
    if (output_textures)
      gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING,
#if !HAVE_IOS
          GST_GL_TEXTURE_TARGET_RECTANGLE_STR,
#else
          GST_GL_TEXTURE_TARGET_2D_STR,
#endif
          NULL);
  }
  gst_caps_unref (caps);

  if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) {
    gboolean renegotiating = vtdec->session != NULL;

    GST_INFO_OBJECT (vtdec,
        "negotiated output format %" GST_PTR_FORMAT " previous %"
        GST_PTR_FORMAT, output_state->caps, prevcaps);

    if (vtdec->session)
      gst_vtdec_invalidate_session (vtdec);

    err = gst_vtdec_create_session (vtdec, format, TRUE);
    if (err == noErr) {
      GST_INFO_OBJECT (vtdec, "using hardware decoder");
    } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) {
      GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore");
      err = gst_vtdec_create_session (vtdec, format, FALSE);
    }

    if (err != noErr) {
      GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL),
          ("VTDecompressionSessionCreate returned %d", (int) err));
    }
  }

  if (vtdec->texture_cache != NULL && !output_textures) {
    gst_video_texture_cache_free (vtdec->texture_cache);
    vtdec->texture_cache = NULL;
  }

  if (err == noErr && output_textures) {
    /* call this regardless of whether caps have changed or not since a new
     * local context could have become available
     */
    gst_gl_context_helper_ensure_context (vtdec->ctxh);

    GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p",
        vtdec->ctxh->context,
        vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL);

    if (vtdec->texture_cache
        && vtdec->texture_cache->ctx != vtdec->ctxh->context) {
      gst_video_texture_cache_free (vtdec->texture_cache);
      vtdec->texture_cache = NULL;
    }
    if (!vtdec->texture_cache)
      setup_texture_cache (vtdec, vtdec->ctxh->context);
  }

  if (prevcaps)
    gst_caps_unref (prevcaps);

  if (err != noErr)
    return FALSE;

  return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder);
}
Exemple #10
0
static void
gst_vtdec_hw_init (GstVtdecHw * vtdec)
{
  GST_VTDEC (vtdec)->require_hardware = TRUE;
}
Exemple #11
0
static gboolean
gst_vtdec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  gboolean ret;
  GstCaps *caps;
  GstCapsFeatures *features;
  GstVtdec *vtdec = GST_VTDEC (decoder);

  ret =
      GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->decide_allocation
      (decoder, query);
  if (!ret)
    goto out;

  gst_query_parse_allocation (query, &caps, NULL);
  if (caps) {
    GstGLContext *gl_context = NULL;
    features = gst_caps_get_features (caps, 0);

    if (gst_caps_features_contains (features,
            GST_CAPS_FEATURE_MEMORY_GL_MEMORY)) {
      GstContext *context = NULL;
      GstQuery *query = gst_query_new_context ("gst.gl.local_context");
      if (gst_pad_peer_query (GST_VIDEO_DECODER_SRC_PAD (decoder), query)) {

        gst_query_parse_context (query, &context);
        if (context) {
          const GstStructure *s = gst_context_get_structure (context);
          gst_structure_get (s, "context", GST_GL_TYPE_CONTEXT, &gl_context,
              NULL);
        }
      }
      gst_query_unref (query);

      if (context) {
        GstVideoFormat internal_format;
        GstVideoCodecState *output_state =
            gst_video_decoder_get_output_state (decoder);

        GST_INFO_OBJECT (decoder, "pushing textures. GL context %p", context);
        if (vtdec->texture_cache)
          gst_core_video_texture_cache_free (vtdec->texture_cache);

#ifdef HAVE_IOS
        internal_format = GST_VIDEO_FORMAT_NV12;
#else
        internal_format = GST_VIDEO_FORMAT_UYVY;
#endif
        vtdec->texture_cache = gst_core_video_texture_cache_new (gl_context);
        gst_core_video_texture_cache_set_format (vtdec->texture_cache,
            internal_format, output_state->caps);
        gst_video_codec_state_unref (output_state);
        gst_object_unref (gl_context);
      } else {
        GST_WARNING_OBJECT (decoder,
            "got memory:GLMemory caps but not GL context from downstream element");
      }
    }
  }

out:
  return ret;
}