예제 #1
0
static gboolean
gst_amc_video_enc_set_src_caps (GstAmcVideoEnc * self, GstAmcFormat * format)
{
  GstCaps *caps;
  GstVideoCodecState *output_state;

  caps = caps_from_amc_format (format);
  if (!caps) {
    GST_ERROR_OBJECT (self, "Failed to create output caps");
    return FALSE;
  }

  /* It may not be proper to reference self->input_state here,
   * because MediaCodec is an async model -- input_state may change multiple times,
   * the passed-in MediaFormat may not be the one matched to the current input_state.
   *
   * Though, currently, the final src caps only calculate
   * width/height/pixel-aspect-ratio/framerate/codec_data from self->input_state.
   *
   * If input width/height/codec_data change(is_format_change), it will restart
   * MediaCodec, which means in these cases, self->input_state is matched.
   */
  output_state = gst_video_encoder_set_output_state (GST_VIDEO_ENCODER (self),
      caps, self->input_state);
  gst_video_codec_state_unref (output_state);

  if (!gst_video_encoder_negotiate (GST_VIDEO_ENCODER (self)))
    return FALSE;

  return TRUE;
}
예제 #2
0
static gboolean
ensure_output_state (GstVaapiEncode * encode)
{
  GstVideoEncoder *const venc = GST_VIDEO_ENCODER_CAST (encode);
  GstVaapiEncodeClass *const klass = GST_VAAPIENCODE_GET_CLASS (encode);
  GstVaapiEncoderStatus status;
  GstCaps *out_caps;

  if (!encode->input_state_changed)
    return TRUE;

  out_caps = klass->get_caps (encode);
  if (!out_caps)
    return FALSE;

  if (encode->output_state)
    gst_video_codec_state_unref (encode->output_state);
  encode->output_state = gst_video_encoder_set_output_state (venc, out_caps,
      encode->input_state);

  if (encode->need_codec_data) {
    status = gst_vaapi_encoder_get_codec_data (encode->encoder,
        &encode->output_state->codec_data);
    if (status != GST_VAAPI_ENCODER_STATUS_SUCCESS)
      return FALSE;
  }

  if (!gst_video_encoder_negotiate (venc))
    return FALSE;

  encode->input_state_changed = FALSE;
  return TRUE;
}
예제 #3
0
static gboolean
gst_openjpeg_enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenJPEGEnc *self = GST_OPENJPEG_ENC (encoder);
  GstCaps *allowed_caps, *caps;
  GstStructure *s;
  const gchar *colorspace;
  gint ncomps;

  GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);

  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = gst_video_codec_state_ref (state);

  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  allowed_caps = gst_caps_truncate (allowed_caps);
  s = gst_caps_get_structure (allowed_caps, 0);
  if (gst_structure_has_name (s, "image/jp2")) {
    self->codec_format = OPJ_CODEC_JP2;
    self->is_jp2c = FALSE;
  } else if (gst_structure_has_name (s, "image/x-j2c")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = TRUE;
  } else if (gst_structure_has_name (s, "image/x-jpc")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = FALSE;
  } else {
    g_return_val_if_reached (FALSE);
  }

  switch (state->info.finfo->format) {
    case GST_VIDEO_FORMAT_ARGB64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_ARGB:
      self->fill_image = fill_image_packed8_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_xRGB:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_Y444_10LE:
    case GST_VIDEO_FORMAT_Y444_10BE:
    case GST_VIDEO_FORMAT_I422_10LE:
    case GST_VIDEO_FORMAT_I422_10BE:
    case GST_VIDEO_FORMAT_I420_10LE:
    case GST_VIDEO_FORMAT_I420_10BE:
      self->fill_image = fill_image_planar16_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_Y444:
    case GST_VIDEO_FORMAT_Y42B:
    case GST_VIDEO_FORMAT_I420:
    case GST_VIDEO_FORMAT_Y41B:
    case GST_VIDEO_FORMAT_YUV9:
      self->fill_image = fill_image_planar8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_GRAY8:
      self->fill_image = fill_image_planar8_1;
      ncomps = 1;
      break;
    case GST_VIDEO_FORMAT_GRAY16_LE:
    case GST_VIDEO_FORMAT_GRAY16_BE:
      self->fill_image = fill_image_planar16_1;
      ncomps = 1;
      break;
    default:
      g_assert_not_reached ();
  }

  if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_YUV))
    colorspace = "sYUV";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_RGB))
    colorspace = "sRGB";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_GRAY))
    colorspace = "GRAY";
  else
    g_return_val_if_reached (FALSE);

  caps = gst_caps_new_simple (gst_structure_get_name (s),
      "colorspace", G_TYPE_STRING, colorspace,
      "num-components", G_TYPE_INT, ncomps, NULL);
  gst_caps_unref (allowed_caps);

  if (self->output_state)
    gst_video_codec_state_unref (self->output_state);
  self->output_state =
      gst_video_encoder_set_output_state (encoder, caps, state);

  gst_video_encoder_negotiate (GST_VIDEO_ENCODER (encoder));

  return TRUE;
}
예제 #4
0
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}
예제 #5
0
static GstFlowReturn
gst_amc_video_enc_handle_output_frame (GstAmcVideoEnc * self,
    GstAmcBuffer * buf, const GstAmcBufferInfo * buffer_info,
    GstVideoCodecFrame * frame)
{
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstVideoEncoder *encoder = GST_VIDEO_ENCODER_CAST (self);

  /* The BUFFER_FLAG_CODEC_CONFIG logic is borrowed from
   * gst-omx. see *_handle_output_frame in
   * gstomxvideoenc.c and gstomxh264enc.c */
  if ((buffer_info->flags & BUFFER_FLAG_CODEC_CONFIG)
      && buffer_info->size > 0) {
    GstStructure *s;
    GstVideoCodecState *state;

    state = gst_video_encoder_get_output_state (encoder);
    s = gst_caps_get_structure (state->caps, 0);
    if (!strcmp (gst_structure_get_name (s), "video/x-h264")) {
      gst_video_codec_state_unref (state);

      if (buffer_info->size > 4 &&
          GST_READ_UINT32_BE (buf->data + buffer_info->offset) == 0x00000001) {
        GList *l = NULL;
        GstBuffer *hdrs;

        GST_DEBUG_OBJECT (self, "got codecconfig in byte-stream format");

        hdrs = gst_buffer_new_and_alloc (buffer_info->size);
        gst_buffer_fill (hdrs, 0, buf->data + buffer_info->offset,
            buffer_info->size);

        l = g_list_append (l, hdrs);
        gst_video_encoder_set_headers (encoder, l);
      }
    } else {
      GstBuffer *codec_data;

      GST_DEBUG_OBJECT (self, "Handling codec data");

      codec_data = gst_buffer_new_and_alloc (buffer_info->size);
      gst_buffer_fill (codec_data, 0, buf->data + buffer_info->offset,
          buffer_info->size);
      state->codec_data = codec_data;
      gst_video_codec_state_unref (state);

      if (!gst_video_encoder_negotiate (encoder)) {
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_NOT_NEGOTIATED;
      }

      return GST_FLOW_OK;
    }
  }

  if (buffer_info->size > 0) {
    GstBuffer *out_buf;
    GstPad *srcpad;

    srcpad = GST_VIDEO_ENCODER_SRC_PAD (encoder);
    out_buf =
        gst_video_encoder_allocate_output_buffer (encoder, buffer_info->size);
    gst_buffer_fill (out_buf, 0, buf->data + buffer_info->offset,
        buffer_info->size);

    GST_BUFFER_PTS (out_buf) =
        gst_util_uint64_scale (buffer_info->presentation_time_us, GST_USECOND,
        1);

    if (frame) {
      frame->output_buffer = out_buf;
      flow_ret = gst_video_encoder_finish_frame (encoder, frame);
    } else {
      /* This sometimes happens at EOS or if the input is not properly framed,
       * let's handle it gracefully by allocating a new buffer for the current
       * caps and filling it
       */

      GST_ERROR_OBJECT (self, "No corresponding frame found");
      flow_ret = gst_pad_push (srcpad, out_buf);
    }
  } else if (frame) {
    flow_ret = gst_video_encoder_finish_frame (encoder, frame);
  }

  return flow_ret;
}
예제 #6
0
static gboolean
gst_openh264enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenh264Enc *openh264enc = GST_OPENH264ENC (encoder);
  gchar *debug_caps;
  guint width, height, fps_n, fps_d;
  SEncParamExt enc_params;
  SliceModeEnum slice_mode = SM_SINGLE_SLICE;
  guint n_slices = 1;
  gint ret;
  GstCaps *outcaps;
  GstVideoCodecState *output_state;
  openh264enc->frame_count = 0;
  int video_format = videoFormatI420;

  debug_caps = gst_caps_to_string (state->caps);
  GST_DEBUG_OBJECT (openh264enc, "gst_e26d4_enc_set_format called, caps: %s",
      debug_caps);
  g_free (debug_caps);

  gst_openh264enc_stop (encoder);

  if (openh264enc->input_state) {
    gst_video_codec_state_unref (openh264enc->input_state);
  }
  openh264enc->input_state = gst_video_codec_state_ref (state);

  width = GST_VIDEO_INFO_WIDTH (&state->info);
  height = GST_VIDEO_INFO_HEIGHT (&state->info);
  fps_n = GST_VIDEO_INFO_FPS_N (&state->info);
  fps_d = GST_VIDEO_INFO_FPS_D (&state->info);

  if (openh264enc->encoder != NULL) {
    openh264enc->encoder->Uninitialize ();
    WelsDestroySVCEncoder (openh264enc->encoder);
    openh264enc->encoder = NULL;
  }
  WelsCreateSVCEncoder (&openh264enc->encoder);
  unsigned int uiTraceLevel = WELS_LOG_ERROR;
  openh264enc->encoder->SetOption (ENCODER_OPTION_TRACE_LEVEL, &uiTraceLevel);

  openh264enc->encoder->GetDefaultParams (&enc_params);

  enc_params.iUsageType = openh264enc->usage_type;
  enc_params.iPicWidth = width;
  enc_params.iPicHeight = height;
  enc_params.iTargetBitrate = openh264enc->bitrate;
  enc_params.iMaxBitrate = openh264enc->max_bitrate;
  enc_params.iRCMode = openh264enc->rate_control;
  enc_params.iTemporalLayerNum = 1;
  enc_params.iSpatialLayerNum = 1;
  enc_params.iLtrMarkPeriod = 30;
  enc_params.iMultipleThreadIdc = openh264enc->multi_thread;
  enc_params.bEnableDenoise = openh264enc->enable_denoise;
  enc_params.iComplexityMode = openh264enc->complexity;
  enc_params.uiIntraPeriod = openh264enc->gop_size;
  enc_params.bEnableBackgroundDetection = openh264enc->background_detection;
  enc_params.bEnableAdaptiveQuant = openh264enc->adaptive_quantization;
  enc_params.bEnableSceneChangeDetect = openh264enc->scene_change_detection;
  enc_params.bEnableFrameSkip = openh264enc->enable_frame_skip;
  enc_params.bEnableLongTermReference = 0;
#if OPENH264_MINOR >= 4
  enc_params.eSpsPpsIdStrategy = CONSTANT_ID;
#else
  enc_params.bEnableSpsPpsIdAddition = 0;
#endif
  enc_params.bPrefixNalAddingCtrl = 0;
  enc_params.fMaxFrameRate = fps_n * 1.0 / fps_d;
  enc_params.iLoopFilterDisableIdc = openh264enc->deblocking_mode;
  enc_params.sSpatialLayers[0].uiProfileIdc = PRO_BASELINE;
  enc_params.sSpatialLayers[0].iVideoWidth = enc_params.iPicWidth;
  enc_params.sSpatialLayers[0].iVideoHeight = enc_params.iPicHeight;
  enc_params.sSpatialLayers[0].fFrameRate = fps_n * 1.0 / fps_d;
  enc_params.sSpatialLayers[0].iSpatialBitrate = enc_params.iTargetBitrate;
  enc_params.sSpatialLayers[0].iMaxSpatialBitrate = enc_params.iMaxBitrate;

  if (openh264enc->slice_mode == GST_OPENH264_SLICE_MODE_N_SLICES) {
    if (openh264enc->num_slices == 1)
      slice_mode = SM_SINGLE_SLICE;
    else
      slice_mode = SM_FIXEDSLCNUM_SLICE;
     n_slices = openh264enc->num_slices;
  } else if (openh264enc->slice_mode == GST_OPENH264_SLICE_MODE_AUTO) {
#if OPENH264_MAJOR == 1 && OPENH264_MINOR < 6
    slice_mode = SM_AUTO_SLICE;
#else
    slice_mode = SM_FIXEDSLCNUM_SLICE;
    n_slices = 0;
#endif
  } else {
    GST_ERROR_OBJECT (openh264enc, "unexpected slice mode %d",
        openh264enc->slice_mode);
    slice_mode = SM_SINGLE_SLICE;
  }

#if OPENH264_MAJOR == 1 && OPENH264_MINOR < 6
  enc_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = slice_mode;
  enc_params.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = n_slices;
#else
  enc_params.sSpatialLayers[0].sSliceArgument.uiSliceMode = slice_mode;
  enc_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = n_slices;
#endif

  openh264enc->framerate = (1 + fps_n / fps_d);

  ret = openh264enc->encoder->InitializeExt (&enc_params);

  if (ret != cmResultSuccess) {
    GST_ERROR_OBJECT (openh264enc, "failed to initialize encoder");
    return FALSE;
  }

  openh264enc->encoder->SetOption (ENCODER_OPTION_DATAFORMAT, &video_format);

  outcaps =
      gst_caps_copy (gst_static_pad_template_get_caps
      (&gst_openh264enc_src_template));

  output_state = gst_video_encoder_set_output_state (encoder, outcaps, state);
  gst_video_codec_state_unref (output_state);

  return gst_video_encoder_negotiate (encoder);
}