예제 #1
0
static void
gst_x265_enc_set_latency (GstX265Enc * encoder)
{
  GstVideoInfo *info = &encoder->input_state->info;

  if (info->fps_n) {
    GstClockTime latency;
    gint max_delayed_frames;

    // FIXME get a real value from the encoder, this is currently not exposed
    max_delayed_frames = 5;
    latency = gst_util_uint64_scale_ceil (GST_SECOND * info->fps_d,
        max_delayed_frames, info->fps_n);

    GST_INFO_OBJECT (encoder,
        "Updating latency to %" GST_TIME_FORMAT " (%d frames)",
        GST_TIME_ARGS (latency), max_delayed_frames);

    gst_video_encoder_set_latency (GST_VIDEO_ENCODER (encoder), latency,
        latency);
  } else {
    /* We can't do live as we don't know our latency */
    gst_video_encoder_set_latency (GST_VIDEO_ENCODER (encoder),
        0, GST_CLOCK_TIME_NONE);
  }
}
예제 #2
0
static gboolean
gst_amc_video_enc_set_src_caps (GstAmcVideoEnc * self, GstAmcFormat * format)
{
  GstCaps *caps;
  GstVideoCodecState *output_state;

  caps = caps_from_amc_format (format);
  if (!caps) {
    GST_ERROR_OBJECT (self, "Failed to create output caps");
    return FALSE;
  }

  /* It may not be proper to reference self->input_state here,
   * because MediaCodec is an async model -- input_state may change multiple times,
   * the passed-in MediaFormat may not be the one matched to the current input_state.
   *
   * Though, currently, the final src caps only calculate
   * width/height/pixel-aspect-ratio/framerate/codec_data from self->input_state.
   *
   * If input width/height/codec_data change(is_format_change), it will restart
   * MediaCodec, which means in these cases, self->input_state is matched.
   */
  output_state = gst_video_encoder_set_output_state (GST_VIDEO_ENCODER (self),
      caps, self->input_state);
  gst_video_codec_state_unref (output_state);

  if (!gst_video_encoder_negotiate (GST_VIDEO_ENCODER (self)))
    return FALSE;

  return TRUE;
}
예제 #3
0
/* gst_x265_enc_set_src_caps
 * Returns: TRUE on success.
 */
static gboolean
gst_x265_enc_set_src_caps (GstX265Enc * encoder, GstCaps * caps)
{
  GstCaps *outcaps;
  GstStructure *structure;
  GstVideoCodecState *state;
  GstTagList *tags;

  outcaps = gst_caps_new_empty_simple ("video/x-h265");
  structure = gst_caps_get_structure (outcaps, 0);

  gst_structure_set (structure, "stream-format", G_TYPE_STRING, "byte-stream",
      NULL);
  gst_structure_set (structure, "alignment", G_TYPE_STRING, "au", NULL);

  if (!gst_x265_enc_set_level_tier_and_profile (encoder, outcaps)) {
    gst_caps_unref (outcaps);
    return FALSE;
  }

  state = gst_video_encoder_set_output_state (GST_VIDEO_ENCODER (encoder),
      outcaps, encoder->input_state);
  GST_DEBUG_OBJECT (encoder, "output caps: %" GST_PTR_FORMAT, state->caps);
  gst_video_codec_state_unref (state);

  tags = gst_tag_list_new_empty ();
  gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_ENCODER, "x265",
      GST_TAG_ENCODER_VERSION, x265_version_str, NULL);
  gst_video_encoder_merge_tags (GST_VIDEO_ENCODER (encoder), tags,
      GST_TAG_MERGE_REPLACE);
  gst_tag_list_unref (tags);

  return TRUE;
}
예제 #4
0
static GstFlowReturn
theora_push_packet (GstTheoraEnc * enc, ogg_packet * packet)
{
    GstVideoEncoder *benc;
    GstFlowReturn ret;
    GstVideoCodecFrame *frame;

    benc = GST_VIDEO_ENCODER (enc);

    frame = gst_video_encoder_get_oldest_frame (benc);
    if (gst_video_encoder_allocate_output_frame (benc, frame,
            packet->bytes) != GST_FLOW_OK) {
        GST_WARNING_OBJECT (enc, "Could not allocate buffer");
        gst_video_codec_frame_unref (frame);
        ret = GST_FLOW_ERROR;
        goto done;
    }

    gst_buffer_fill (frame->output_buffer, 0, packet->packet, packet->bytes);

    /* the second most significant bit of the first data byte is cleared
     * for keyframes */
    if (packet->bytes > 0 && (packet->packet[0] & 0x40) == 0) {
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
    } else {
        GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);
    }
    enc->packetno++;

    ret = gst_video_encoder_finish_frame (benc, frame);

done:
    return ret;
}
예제 #5
0
static void
gst_x265_enc_set_latency (GstX265Enc * encoder)
{
  GstVideoInfo *info = &encoder->input_state->info;
  gint max_delayed_frames;
  GstClockTime latency;

  /* FIXME get a real value from the encoder, this is currently not exposed */
  if (encoder->tune > 0 && encoder->tune <= G_N_ELEMENTS (x265_tune_names) &&
      strcmp (x265_tune_names[encoder->tune + 1], "zerolatency") == 0)
    max_delayed_frames = 0;
  else
    max_delayed_frames = 5;

  if (info->fps_n) {
    latency = gst_util_uint64_scale_ceil (GST_SECOND * info->fps_d,
        max_delayed_frames, info->fps_n);
  } else {
    /* FIXME: Assume 25fps. This is better than reporting no latency at
     * all and then later failing in live pipelines
     */
    latency = gst_util_uint64_scale_ceil (GST_SECOND * 1,
        max_delayed_frames, 25);
  }

  GST_INFO_OBJECT (encoder,
      "Updating latency to %" GST_TIME_FORMAT " (%d frames)",
      GST_TIME_ARGS (latency), max_delayed_frames);

  gst_video_encoder_set_latency (GST_VIDEO_ENCODER (encoder), latency, latency);
}
예제 #6
0
static GstBuffer *
theora_enc_buffer_from_header_packet (GstTheoraEnc * enc, ogg_packet * packet)
{
    GstBuffer *outbuf;

    outbuf =
        gst_video_encoder_allocate_output_buffer (GST_VIDEO_ENCODER (enc),
                packet->bytes);
    gst_buffer_fill (outbuf, 0, packet->packet, packet->bytes);
    GST_BUFFER_OFFSET (outbuf) = 0;
    GST_BUFFER_OFFSET_END (outbuf) = 0;
    GST_BUFFER_TIMESTAMP (outbuf) = GST_CLOCK_TIME_NONE;
    GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE;

    GST_DEBUG ("created header packet buffer, %u bytes",
               (guint) gst_buffer_get_size (outbuf));
    return outbuf;
}
예제 #7
0
static GstFlowReturn
gst_av1_enc_process (GstAV1Enc * encoder)
{
  aom_codec_iter_t iter = NULL;
  const aom_codec_cx_pkt_t *pkt;
  GstVideoCodecFrame *frame;
  GstVideoEncoder *video_encoder;
  GstFlowReturn ret = GST_FLOW_CUSTOM_SUCCESS;

  video_encoder = GST_VIDEO_ENCODER (encoder);

  while ((pkt = aom_codec_get_cx_data (&encoder->encoder, &iter)) != NULL) {
    if (pkt->kind == AOM_CODEC_STATS_PKT) {
      GST_WARNING_OBJECT (encoder, "Unhandled stats packet");
    } else if (pkt->kind == AOM_CODEC_FPMB_STATS_PKT) {
      GST_WARNING_OBJECT (encoder, "Unhandled FPMB pkt");
    } else if (pkt->kind == AOM_CODEC_PSNR_PKT) {
      GST_WARNING_OBJECT (encoder, "Unhandled PSNR packet");
    } else if (pkt->kind == AOM_CODEC_CX_FRAME_PKT) {
      frame = gst_video_encoder_get_oldest_frame (video_encoder);
      g_assert (frame != NULL);
      if ((pkt->data.frame.flags & AOM_FRAME_IS_KEY) != 0) {
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
      } else {
        GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);
      }

      frame->output_buffer =
          gst_buffer_new_wrapped (g_memdup (pkt->data.frame.buf,
              pkt->data.frame.sz), pkt->data.frame.sz);

      if ((pkt->data.frame.flags & AOM_FRAME_IS_DROPPABLE) != 0)
        GST_BUFFER_FLAG_SET (frame->output_buffer, GST_BUFFER_FLAG_DROPPABLE);
      if ((pkt->data.frame.flags & AOM_FRAME_IS_INVISIBLE) != 0)
        GST_BUFFER_FLAG_SET (frame->output_buffer, GST_BUFFER_FLAG_DECODE_ONLY);

      ret = gst_video_encoder_finish_frame (video_encoder, frame);
      if (ret != GST_FLOW_OK)
        break;
    }
  }

  return ret;
}
예제 #8
0
static GstFlowReturn
gst_omx_h264_enc_handle_output_frame (GstOMXVideoEnc * enc, GstOMXPort * port,
    GstOMXBuffer * buf, GstVideoCodecFrame * frame)
{
  GstOMXH264Enc *self = GST_OMX_H264_ENC (enc);

  if (buf->omx_buf->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
    /* The codec data is SPS/PPS with a startcode => bytestream stream format
     * For bytestream stream format the SPS/PPS is only in-stream and not
     * in the caps!
     */
    if (buf->omx_buf->nFilledLen >= 4 &&
        GST_READ_UINT32_BE (buf->omx_buf->pBuffer +
            buf->omx_buf->nOffset) == 0x00000001) {
      GstBuffer *hdrs;
      GstMapInfo map = GST_MAP_INFO_INIT;

      GST_DEBUG_OBJECT (self, "got codecconfig in byte-stream format");

      hdrs = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen);

      gst_buffer_map (hdrs, &map, GST_MAP_WRITE);
      memcpy (map.data,
          buf->omx_buf->pBuffer + buf->omx_buf->nOffset,
          buf->omx_buf->nFilledLen);
      gst_buffer_unmap (hdrs, &map);
      self->headers = g_list_append (self->headers, hdrs);

      if (frame)
        gst_video_codec_frame_unref (frame);

      return GST_FLOW_OK;
    }
  } else if (self->headers) {
    gst_video_encoder_set_headers (GST_VIDEO_ENCODER (self), self->headers);
    self->headers = NULL;
  }

  return
      GST_OMX_VIDEO_ENC_CLASS
      (gst_omx_h264_enc_parent_class)->handle_output_frame (enc, port, buf,
      frame);
}
static gboolean
gst_webp_enc_set_format (GstVideoEncoder * encoder, GstVideoCodecState * state)
{
  GstWebpEnc *enc = GST_WEBP_ENC (encoder);
  GstVideoCodecState *output_state;
  GstVideoInfo *info;
  GstVideoFormat format;

  info = &state->info;
  format = GST_VIDEO_INFO_FORMAT (info);

  if (GST_VIDEO_INFO_IS_YUV (info)) {
    switch (format) {
      case GST_VIDEO_FORMAT_I420:
      case GST_VIDEO_FORMAT_YV12:
        enc->webp_color_space = WEBP_YUV420;
        break;
      default:
        break;
    }
  } else {
    if (GST_VIDEO_INFO_IS_RGB (info)) {
      enc->rgb_format = format;
      enc->use_argb = 1;
    }
  }

  if (enc->input_state)
    gst_video_codec_state_unref (enc->input_state);
  enc->input_state = gst_video_codec_state_ref (state);

  output_state =
      gst_video_encoder_set_output_state (GST_VIDEO_ENCODER (enc),
      gst_caps_new_empty_simple ("image/webp"), enc->input_state);
  gst_video_codec_state_unref (output_state);

  return TRUE;
}
예제 #10
0
static void
gst_av1_enc_set_latency (GstAV1Enc * av1enc)
{
  GstClockTime latency;
  gint fps_n, fps_d;

  if (av1enc->input_state->info.fps_n && av1enc->input_state->info.fps_d) {
    fps_n = av1enc->input_state->info.fps_n;
    fps_d = av1enc->input_state->info.fps_d;
  } else {
    fps_n = 25;
    fps_d = 1;
  }

  latency =
      gst_util_uint64_scale (av1enc->aom_cfg.g_lag_in_frames * GST_SECOND,
      fps_d, fps_n);
  gst_video_encoder_set_latency (GST_VIDEO_ENCODER (av1enc), latency, latency);

  GST_DEBUG_OBJECT (av1enc, "Latency set to %" GST_TIME_FORMAT
      " = %d frames at %d/%d fps ", GST_TIME_ARGS (latency),
      av1enc->aom_cfg.g_lag_in_frames, fps_n, fps_d);
}
예제 #11
0
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}
예제 #12
0
static GstFlowReturn
gst_ffmpegvidenc_flush_buffers (GstFFMpegVidEnc * ffmpegenc, gboolean send)
{
  GstVideoCodecFrame *frame;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  gint ret_size;

  GST_DEBUG_OBJECT (ffmpegenc, "flushing buffers with sending %d", send);

  /* no need to empty codec if there is none */
  if (!ffmpegenc->opened)
    goto done;

  while ((frame =
          gst_video_encoder_get_oldest_frame (GST_VIDEO_ENCODER (ffmpegenc)))) {

    ffmpegenc_setup_working_buf (ffmpegenc);

    ret_size = avcodec_encode_video (ffmpegenc->context,
        ffmpegenc->working_buf, ffmpegenc->working_buf_size, NULL);

    if (ret_size < 0) {         /* there should be something, notify and give up */
#ifndef GST_DISABLE_GST_DEBUG
      GstFFMpegVidEncClass *oclass =
          (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
      GST_WARNING_OBJECT (ffmpegenc,
          "avenc_%s: failed to flush buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
      gst_video_codec_frame_unref (frame);
      break;
    }

    /* save stats info if there is some as well as a stats file */
    if (ffmpegenc->file && ffmpegenc->context->stats_out)
      if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
        GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
            (("Could not write to file \"%s\"."), ffmpegenc->filename),
            GST_ERROR_SYSTEM);

    if (send) {
      if (gst_video_encoder_allocate_output_frame (GST_VIDEO_ENCODER
              (ffmpegenc), frame, ret_size) != GST_FLOW_OK) {
#ifndef GST_DISABLE_GST_DEBUG
        GstFFMpegVidEncClass *oclass =
            (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
        GST_WARNING_OBJECT (ffmpegenc,
            "avenc_%s: failed to allocate buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
        gst_video_codec_frame_unref (frame);
        break;
      }
      outbuf = frame->output_buffer;
      gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);

      if (ffmpegenc->context->coded_frame->key_frame)
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);

      flow_ret =
          gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
    } else {
      gst_video_codec_frame_unref (frame);
    }
  }

done:

  return flow_ret;
}
예제 #13
0
static gboolean gst_imx_vpu_h264_enc_set_open_params(GstImxVpuBaseEnc *vpu_base_enc, VpuEncOpenParam *open_param)
{
	GstCaps *template_caps, *allowed_caps;
	GstImxVpuH264Enc *enc = GST_IMX_VPU_H264_ENC(vpu_base_enc);

	open_param->eFormat = VPU_V_AVC;
	open_param->eColorFormat = VPU_COLOR_420;

	/* These are default settings from VPU_EncOpenSimp */
	open_param->VpuEncStdParam.avcParam.avc_constrainedIntraPredFlag = 0;
	open_param->VpuEncStdParam.avcParam.avc_disableDeblk = 0;
	open_param->VpuEncStdParam.avcParam.avc_deblkFilterOffsetAlpha = 6;
	open_param->VpuEncStdParam.avcParam.avc_deblkFilterOffsetBeta = 0;
	open_param->VpuEncStdParam.avcParam.avc_chromaQpOffset = 10;
	open_param->VpuEncStdParam.avcParam.avc_audEnable = 0;
	open_param->VpuEncStdParam.avcParam.avc_fmoEnable = 0;
	open_param->VpuEncStdParam.avcParam.avc_fmoType = 0;
	open_param->VpuEncStdParam.avcParam.avc_fmoSliceNum = 1;
	open_param->VpuEncStdParam.avcParam.avc_fmoSliceSaveBufSize = 32;

	/* Since this call is part of set_format, it is a suitable place for looking up whether or not
	 * downstream requires access units. The src caps are retrieved and examined for this purpose. */

	template_caps = gst_static_pad_template_get_caps(&static_src_template);
	allowed_caps = gst_pad_get_allowed_caps(GST_VIDEO_ENCODER_SRC_PAD(GST_VIDEO_ENCODER(vpu_base_enc)));

	if (allowed_caps == template_caps)
	{
		enc->produce_access_units = TRUE;
	}
	else if (allowed_caps != NULL)
	{
		GstStructure *s;
		gchar const *alignment_str;

		if (gst_caps_is_empty(allowed_caps))
		{
			GST_ERROR_OBJECT(enc, "src caps are empty");
			gst_caps_unref(allowed_caps);
			return FALSE;
		}

		allowed_caps = gst_caps_make_writable(allowed_caps);
		allowed_caps = gst_caps_fixate(allowed_caps);
		s = gst_caps_get_structure(allowed_caps, 0);

		alignment_str = gst_structure_get_string(s, "alignment");
		enc->produce_access_units = !g_strcmp0(alignment_str, "au");

		gst_caps_unref(allowed_caps);
	}

	if (enc->produce_access_units)
		open_param->VpuEncStdParam.avcParam.avc_audEnable = 1;

	GST_INFO_OBJECT(vpu_base_enc, "produce h.264 access units: %s", enc->produce_access_units ? "yes" : "no");

	gst_caps_unref(template_caps);

	return TRUE;
}
예제 #14
0
static void
gst_vp8_enc_set_stream_info (GstVPXEnc * enc, GstCaps * caps,
    GstVideoInfo * info)
{
  GstStructure *s;
  GstVideoEncoder *video_encoder;
  GstBuffer *stream_hdr, *vorbiscomment;
  const GstTagList *iface_tags;
  GValue array = { 0, };
  GValue value = { 0, };
  guint8 *data = NULL;
  GstMapInfo map;

  video_encoder = GST_VIDEO_ENCODER (enc);
  s = gst_caps_get_structure (caps, 0);

  /* put buffers in a fixed list */
  g_value_init (&array, GST_TYPE_ARRAY);
  g_value_init (&value, GST_TYPE_BUFFER);

  /* Create Ogg stream-info */
  stream_hdr = gst_buffer_new_and_alloc (26);
  gst_buffer_map (stream_hdr, &map, GST_MAP_WRITE);
  data = map.data;

  GST_WRITE_UINT8 (data, 0x4F);
  GST_WRITE_UINT32_BE (data + 1, 0x56503830);   /* "VP80" */
  GST_WRITE_UINT8 (data + 5, 0x01);     /* stream info header */
  GST_WRITE_UINT8 (data + 6, 1);        /* Major version 1 */
  GST_WRITE_UINT8 (data + 7, 0);        /* Minor version 0 */
  GST_WRITE_UINT16_BE (data + 8, GST_VIDEO_INFO_WIDTH (info));
  GST_WRITE_UINT16_BE (data + 10, GST_VIDEO_INFO_HEIGHT (info));
  GST_WRITE_UINT24_BE (data + 12, GST_VIDEO_INFO_PAR_N (info));
  GST_WRITE_UINT24_BE (data + 15, GST_VIDEO_INFO_PAR_D (info));
  GST_WRITE_UINT32_BE (data + 18, GST_VIDEO_INFO_FPS_N (info));
  GST_WRITE_UINT32_BE (data + 22, GST_VIDEO_INFO_FPS_D (info));

  gst_buffer_unmap (stream_hdr, &map);

  GST_BUFFER_FLAG_SET (stream_hdr, GST_BUFFER_FLAG_HEADER);
  gst_value_set_buffer (&value, stream_hdr);
  gst_value_array_append_value (&array, &value);
  g_value_unset (&value);
  gst_buffer_unref (stream_hdr);

  iface_tags = gst_tag_setter_get_tag_list (GST_TAG_SETTER (video_encoder));
  if (iface_tags) {
    vorbiscomment =
        gst_tag_list_to_vorbiscomment_buffer (iface_tags,
        (const guint8 *) "OVP80\2 ", 7,
        "Encoded with GStreamer vp8enc " PACKAGE_VERSION);

    GST_BUFFER_FLAG_SET (vorbiscomment, GST_BUFFER_FLAG_HEADER);

    g_value_init (&value, GST_TYPE_BUFFER);
    gst_value_set_buffer (&value, vorbiscomment);
    gst_value_array_append_value (&array, &value);
    g_value_unset (&value);
    gst_buffer_unref (vorbiscomment);
  }

  gst_structure_set_value (s, "streamheader", &array);
  g_value_unset (&array);

}
예제 #15
0
static GstFlowReturn
gst_x265_enc_encode_frame (GstX265Enc * encoder, x265_picture * pic_in,
    GstVideoCodecFrame * input_frame, guint32 * i_nal, gboolean send)
{
  GstVideoCodecFrame *frame = NULL;
  GstBuffer *out_buf = NULL;
  x265_picture pic_out;
  x265_nal *nal;
  int i_size, i, offset;
  int encoder_return;
  GstFlowReturn ret = GST_FLOW_OK;
  gboolean update_latency = FALSE;

  if (G_UNLIKELY (encoder->x265enc == NULL)) {
    if (input_frame)
      gst_video_codec_frame_unref (input_frame);
    return GST_FLOW_NOT_NEGOTIATED;
  }

  GST_OBJECT_LOCK (encoder);
  if (encoder->reconfig) {
    // x265_encoder_reconfig is not yet implemented thus we shut down and re-create encoder
    gst_x265_enc_init_encoder (encoder);
    update_latency = TRUE;
  }

  if (pic_in && input_frame) {
    if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (input_frame)) {
      GST_INFO_OBJECT (encoder, "Forcing key frame");
      pic_in->sliceType = X265_TYPE_IDR;
    }
  }
  GST_OBJECT_UNLOCK (encoder);

  if (G_UNLIKELY (update_latency))
    gst_x265_enc_set_latency (encoder);

  encoder_return = x265_encoder_encode (encoder->x265enc,
      &nal, i_nal, pic_in, &pic_out);

  GST_DEBUG_OBJECT (encoder, "encoder result (%d) with %u nal units",
      encoder_return, *i_nal);

  if (encoder_return < 0) {
    GST_ELEMENT_ERROR (encoder, STREAM, ENCODE, ("Encode x265 frame failed."),
        ("x265_encoder_encode return code=%d", encoder_return));
    ret = GST_FLOW_ERROR;
    /* Make sure we finish this frame */
    frame = input_frame;
    goto out;
  }

  /* Input frame is now queued */
  if (input_frame)
    gst_video_codec_frame_unref (input_frame);

  if (!*i_nal) {
    ret = GST_FLOW_OK;
    GST_LOG_OBJECT (encoder, "no output yet");
    goto out;
  }

  frame = gst_video_encoder_get_frame (GST_VIDEO_ENCODER (encoder),
      GPOINTER_TO_INT (pic_out.userData));
  g_assert (frame || !send);

  GST_DEBUG_OBJECT (encoder,
      "output picture ready POC=%d system=%d frame found %d", pic_out.poc,
      GPOINTER_TO_INT (pic_out.userData), frame != NULL);

  if (!send || !frame) {
    GST_LOG_OBJECT (encoder, "not sending (%d) or frame not found (%d)", send,
        frame != NULL);
    ret = GST_FLOW_OK;
    goto out;
  }

  i_size = 0;
  offset = 0;
  for (i = 0; i < *i_nal; i++)
    i_size += nal[i].sizeBytes;
  out_buf = gst_buffer_new_allocate (NULL, i_size, NULL);
  for (i = 0; i < *i_nal; i++) {
    gst_buffer_fill (out_buf, offset, nal[i].payload, nal[i].sizeBytes);
    offset += nal[i].sizeBytes;
  }

  frame->output_buffer = out_buf;

  if (encoder->push_header) {
    GstBuffer *header;

    header = gst_x265_enc_get_header_buffer (encoder);
    frame->output_buffer = gst_buffer_append (header, frame->output_buffer);
    encoder->push_header = FALSE;
  }

  GST_LOG_OBJECT (encoder,
      "output: dts %" G_GINT64_FORMAT " pts %" G_GINT64_FORMAT,
      (gint64) pic_out.dts, (gint64) pic_out.pts);

  frame->dts = pic_out.dts + encoder->dts_offset;

out:
  if (frame) {
    gst_x265_enc_dequeue_frame (encoder, frame);
    ret = gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (encoder), frame);
  }

  return ret;
}
예제 #16
0
static gboolean
gst_amc_video_enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstAmcVideoEnc *self;
  GstAmcFormat *format = NULL;
  GstCaps *allowed_caps = NULL;
  gboolean is_format_change = FALSE;
  gboolean needs_disable = FALSE;
  gchar *format_string;
  gboolean r = FALSE;
  GError *err = NULL;

  self = GST_AMC_VIDEO_ENC (encoder);

  GST_DEBUG_OBJECT (self, "Setting new caps %" GST_PTR_FORMAT, state->caps);

  /* Check if the caps change is a real format change or if only irrelevant
   * parts of the caps have changed or nothing at all.
   */
  is_format_change |= self->color_format_info.width != state->info.width;
  is_format_change |= self->color_format_info.height != state->info.height;
  needs_disable = self->started;

  /* If the component is not started and a real format change happens
   * we have to restart the component. If no real format change
   * happened we can just exit here.
   */
  if (needs_disable && !is_format_change) {

    /* Framerate or something minor changed */
    if (self->input_state)
      gst_video_codec_state_unref (self->input_state);
    self->input_state = gst_video_codec_state_ref (state);
    GST_DEBUG_OBJECT (self,
        "Already running and caps did not change the format");
    return TRUE;
  }

  if (needs_disable && is_format_change) {
    gst_amc_video_enc_drain (self);
    GST_VIDEO_ENCODER_STREAM_UNLOCK (self);
    gst_amc_video_enc_stop (GST_VIDEO_ENCODER (self));
    GST_VIDEO_ENCODER_STREAM_LOCK (self);
    gst_amc_video_enc_close (GST_VIDEO_ENCODER (self));
    if (!gst_amc_video_enc_open (GST_VIDEO_ENCODER (self))) {
      GST_ERROR_OBJECT (self, "Failed to open codec again");
      return FALSE;
    }

    if (!gst_amc_video_enc_start (GST_VIDEO_ENCODER (self))) {
      GST_ERROR_OBJECT (self, "Failed to start codec again");
    }
  }
  /* srcpad task is not running at this point */
  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = NULL;

  GST_DEBUG_OBJECT (self, "picking an output format ...");
  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  if (!allowed_caps) {
    GST_DEBUG_OBJECT (self, "... but no peer, using template caps");
    allowed_caps =
        gst_pad_get_pad_template_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  }
  GST_DEBUG_OBJECT (self, "chose caps %" GST_PTR_FORMAT, allowed_caps);
  allowed_caps = gst_caps_truncate (allowed_caps);

  format = create_amc_format (self, state, allowed_caps);
  if (!format)
    goto quit;

  format_string = gst_amc_format_to_string (format, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (self, err);
  GST_DEBUG_OBJECT (self, "Configuring codec with format: %s",
      GST_STR_NULL (format_string));
  g_free (format_string);

  if (!gst_amc_codec_configure (self->codec, format, 1, &err)) {
    GST_ERROR_OBJECT (self, "Failed to configure codec");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    goto quit;
  }

  if (!gst_amc_codec_start (self->codec, &err)) {
    GST_ERROR_OBJECT (self, "Failed to start codec");
    GST_ELEMENT_ERROR_FROM_ERROR (self, err);
    goto quit;
  }

  self->amc_format = format;
  format = NULL;

  self->input_state = gst_video_codec_state_ref (state);

  self->started = TRUE;

  /* Start the srcpad loop again */
  self->flushing = FALSE;
  self->downstream_flow_ret = GST_FLOW_OK;
  gst_pad_start_task (GST_VIDEO_ENCODER_SRC_PAD (self),
      (GstTaskFunction) gst_amc_video_enc_loop, encoder, NULL);

  r = TRUE;

quit:
  if (allowed_caps)
    gst_object_unref (allowed_caps);

  if (format)
    gst_amc_format_free (format);

  return r;
}
예제 #17
0
static GstVideoCodecFrame *
_find_nearest_frame (GstAmcVideoEnc * self, GstClockTime reference_timestamp)
{
  GList *l, *best_l = NULL;
  GList *finish_frames = NULL;
  GstVideoCodecFrame *best = NULL;
  guint64 best_timestamp = 0;
  guint64 best_diff = G_MAXUINT64;
  BufferIdentification *best_id = NULL;
  GList *frames;

  frames = gst_video_encoder_get_frames (GST_VIDEO_ENCODER (self));

  for (l = frames; l; l = l->next) {
    GstVideoCodecFrame *tmp = l->data;
    BufferIdentification *id = gst_video_codec_frame_get_user_data (tmp);
    guint64 timestamp, diff;

    /* This happens for frames that were just added but
     * which were not passed to the component yet. Ignore
     * them here!
     */
    if (!id)
      continue;

    timestamp = id->timestamp;

    if (timestamp > reference_timestamp)
      diff = timestamp - reference_timestamp;
    else
      diff = reference_timestamp - timestamp;

    if (best == NULL || diff < best_diff) {
      best = tmp;
      best_timestamp = timestamp;
      best_diff = diff;
      best_l = l;
      best_id = id;

      /* For frames without timestamp we simply take the first frame */
      if ((reference_timestamp == 0 && timestamp == 0) || diff == 0)
        break;
    }
  }

  if (best_id) {
    for (l = frames; l && l != best_l; l = l->next) {
      GstVideoCodecFrame *tmp = l->data;
      BufferIdentification *id = gst_video_codec_frame_get_user_data (tmp);
      guint64 diff_time, diff_frames;

      if (id->timestamp > best_timestamp)
        break;

      if (id->timestamp == 0 || best_timestamp == 0)
        diff_time = 0;
      else
        diff_time = best_timestamp - id->timestamp;
      diff_frames = best->system_frame_number - tmp->system_frame_number;

      if (diff_time > MAX_FRAME_DIST_TIME
          || diff_frames > MAX_FRAME_DIST_FRAMES) {
        finish_frames =
            g_list_prepend (finish_frames, gst_video_codec_frame_ref (tmp));
      }
    }
  }

  if (finish_frames) {
    g_warning ("%s: Too old frames, bug in encoder -- please file a bug",
        GST_ELEMENT_NAME (self));
    for (l = finish_frames; l; l = l->next) {
      gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (self), l->data);
    }
  }

  if (best)
    gst_video_codec_frame_ref (best);

  g_list_foreach (frames, (GFunc) gst_video_codec_frame_unref, NULL);
  g_list_free (frames);

  return best;
}
예제 #18
0
static gboolean
gst_openjpeg_enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenJPEGEnc *self = GST_OPENJPEG_ENC (encoder);
  GstCaps *allowed_caps, *caps;
  GstStructure *s;
  const gchar *colorspace;
  gint ncomps;

  GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);

  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = gst_video_codec_state_ref (state);

  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  allowed_caps = gst_caps_truncate (allowed_caps);
  s = gst_caps_get_structure (allowed_caps, 0);
  if (gst_structure_has_name (s, "image/jp2")) {
    self->codec_format = OPJ_CODEC_JP2;
    self->is_jp2c = FALSE;
  } else if (gst_structure_has_name (s, "image/x-j2c")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = TRUE;
  } else if (gst_structure_has_name (s, "image/x-jpc")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = FALSE;
  } else {
    g_return_val_if_reached (FALSE);
  }

  switch (state->info.finfo->format) {
    case GST_VIDEO_FORMAT_ARGB64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_ARGB:
      self->fill_image = fill_image_packed8_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_xRGB:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_Y444_10LE:
    case GST_VIDEO_FORMAT_Y444_10BE:
    case GST_VIDEO_FORMAT_I422_10LE:
    case GST_VIDEO_FORMAT_I422_10BE:
    case GST_VIDEO_FORMAT_I420_10LE:
    case GST_VIDEO_FORMAT_I420_10BE:
      self->fill_image = fill_image_planar16_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_Y444:
    case GST_VIDEO_FORMAT_Y42B:
    case GST_VIDEO_FORMAT_I420:
    case GST_VIDEO_FORMAT_Y41B:
    case GST_VIDEO_FORMAT_YUV9:
      self->fill_image = fill_image_planar8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_GRAY8:
      self->fill_image = fill_image_planar8_1;
      ncomps = 1;
      break;
    case GST_VIDEO_FORMAT_GRAY16_LE:
    case GST_VIDEO_FORMAT_GRAY16_BE:
      self->fill_image = fill_image_planar16_1;
      ncomps = 1;
      break;
    default:
      g_assert_not_reached ();
  }

  if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_YUV))
    colorspace = "sYUV";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_RGB))
    colorspace = "sRGB";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_GRAY))
    colorspace = "GRAY";
  else
    g_return_val_if_reached (FALSE);

  caps = gst_caps_new_simple (gst_structure_get_name (s),
      "colorspace", G_TYPE_STRING, colorspace,
      "num-components", G_TYPE_INT, ncomps, NULL);
  gst_caps_unref (allowed_caps);

  if (self->output_state)
    gst_video_codec_state_unref (self->output_state);
  self->output_state =
      gst_video_encoder_set_output_state (encoder, caps, state);

  gst_video_encoder_negotiate (GST_VIDEO_ENCODER (encoder));

  return TRUE;
}
예제 #19
0
static GstFlowReturn
gst_ffmpegvidenc_flush_buffers (GstFFMpegVidEnc * ffmpegenc, gboolean send)
{
    GstVideoCodecFrame *frame;
    GstFlowReturn flow_ret = GST_FLOW_OK;
    GstBuffer *outbuf;
    gint ret;
    AVPacket *pkt;
    int have_data = 0;

    GST_DEBUG_OBJECT (ffmpegenc, "flushing buffers with sending %d", send);

    /* no need to empty codec if there is none */
    if (!ffmpegenc->opened)
        goto done;

    while ((frame =
                gst_video_encoder_get_oldest_frame (GST_VIDEO_ENCODER (ffmpegenc)))) {
        pkt = g_slice_new0 (AVPacket);
        have_data = 0;

        ret = avcodec_encode_video2 (ffmpegenc->context, pkt, NULL, &have_data);

        if (ret < 0) {              /* there should be something, notify and give up */
#ifndef GST_DISABLE_GST_DEBUG
            GstFFMpegVidEncClass *oclass =
                (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
            GST_WARNING_OBJECT (ffmpegenc,
                                "avenc_%s: failed to flush buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
            g_slice_free (AVPacket, pkt);
            gst_video_codec_frame_unref (frame);
            break;
        }

        /* save stats info if there is some as well as a stats file */
        if (ffmpegenc->file && ffmpegenc->context->stats_out)
            if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
                GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
                                   (("Could not write to file \"%s\"."), ffmpegenc->filename),
                                   GST_ERROR_SYSTEM);

        if (send && have_data) {
            outbuf =
                gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                             pkt->size, 0, pkt->size, pkt, gst_ffmpegvidenc_free_avpacket);
            frame->output_buffer = outbuf;

            if (pkt->flags & AV_PKT_FLAG_KEY)
                GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
            else
                GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);

            flow_ret =
                gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
        } else {
            /* no frame attached, so will be skipped and removed from frame list */
            gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
        }
    }

done:

    return flow_ret;
}