Exemple #1
0
static gboolean
theora_enc_src_event (GstPad * pad, GstEvent * event)
{
  GstTheoraEnc *enc;
  gboolean res = TRUE;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_CUSTOM_UPSTREAM:
    {
      const GstStructure *s;

      s = gst_event_get_structure (event);

      if (gst_structure_has_name (s, "GstForceKeyUnit")) {
        GST_OBJECT_LOCK (enc);
        enc->force_keyframe = TRUE;
        GST_OBJECT_UNLOCK (enc);
        /* consume the event */
        res = TRUE;
        gst_event_unref (event);
      } else {
        res = gst_pad_push_event (enc->sinkpad, event);
      }
      break;
    }
    default:
      res = gst_pad_push_event (enc->sinkpad, event);
      break;
  }

  return res;
}
static void
theora_enc_get_property (GObject * object, guint prop_id,
                         GValue * value, GParamSpec * pspec)
{
    GstTheoraEnc *enc = GST_THEORA_ENC (object);

    switch (prop_id) {
    case PROP_BITRATE:
        GST_OBJECT_LOCK (enc);
        g_value_set_int (value, enc->video_bitrate / 1000);
        GST_OBJECT_UNLOCK (enc);
        break;
    case PROP_QUALITY:
        GST_OBJECT_LOCK (enc);
        g_value_set_int (value, enc->video_quality);
        GST_OBJECT_UNLOCK (enc);
        break;
    case PROP_KEYFRAME_AUTO:
        g_value_set_boolean (value, enc->keyframe_auto);
        break;
    case PROP_KEYFRAME_FREQ:
        g_value_set_int (value, enc->keyframe_freq);
        break;
    case PROP_KEYFRAME_FREQ_FORCE:
        g_value_set_int (value, enc->keyframe_force);
        break;
    case PROP_SPEEDLEVEL:
        g_value_set_int (value, enc->speed_level);
        break;
    case PROP_VP3_COMPATIBLE:
        g_value_set_boolean (value, enc->vp3_compatible);
        break;
    case PROP_DROP_FRAMES:
        g_value_set_boolean (value, enc->drop_frames);
        break;
    case PROP_CAP_OVERFLOW:
        g_value_set_boolean (value, enc->cap_overflow);
        break;
    case PROP_CAP_UNDERFLOW:
        g_value_set_boolean (value, enc->cap_underflow);
        break;
    case PROP_RATE_BUFFER:
        g_value_set_int (value, enc->rate_buffer);
        break;
    case PROP_MULTIPASS_CACHE_FILE:
        g_value_set_string (value, enc->multipass_cache_file);
        break;
    case PROP_MULTIPASS_MODE:
        g_value_set_enum (value, enc->multipass_mode);
        break;
    default:
        G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
        break;
    }
}
static gboolean
theora_enc_set_format (GstVideoEncoder * benc, GstVideoCodecState * state)
{
    GstTheoraEnc *enc = GST_THEORA_ENC (benc);
    GstVideoInfo *info = &state->info;

    enc->width = GST_VIDEO_INFO_WIDTH (info);
    enc->height = GST_VIDEO_INFO_HEIGHT (info);

    th_info_clear (&enc->info);
    th_info_init (&enc->info);
    /* Theora has a divisible-by-sixteen restriction for the encoded video size but
     * we can define a picture area using pic_width/pic_height */
    enc->info.frame_width = GST_ROUND_UP_16 (enc->width);
    enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
    enc->info.pic_width = enc->width;
    enc->info.pic_height = enc->height;
    switch (GST_VIDEO_INFO_FORMAT (info)) {
    case GST_VIDEO_FORMAT_I420:
        enc->info.pixel_fmt = TH_PF_420;
        break;
    case GST_VIDEO_FORMAT_Y42B:
        enc->info.pixel_fmt = TH_PF_422;
        break;
    case GST_VIDEO_FORMAT_Y444:
        enc->info.pixel_fmt = TH_PF_444;
        break;
    default:
        g_assert_not_reached ();
    }

    enc->info.fps_numerator = enc->fps_n = GST_VIDEO_INFO_FPS_N (info);
    enc->info.fps_denominator = enc->fps_d = GST_VIDEO_INFO_FPS_D (info);
    enc->info.aspect_numerator = GST_VIDEO_INFO_PAR_N (info);
    enc->info.aspect_denominator = GST_VIDEO_INFO_PAR_D (info);

    enc->info.colorspace = TH_CS_UNSPECIFIED;

    /* Save input state */
    if (enc->input_state)
        gst_video_codec_state_unref (enc->input_state);
    enc->input_state = gst_video_codec_state_ref (state);

    /* as done in theora */
    enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1);
    GST_DEBUG_OBJECT (enc,
                      "keyframe_frequency_force is %d, granule shift is %d",
                      enc->keyframe_force, enc->info.keyframe_granule_shift);

    theora_enc_reset (enc);
    enc->initialised = TRUE;

    return TRUE;
}
static void
theora_enc_finalize (GObject * object)
{
  GstTheoraEnc *enc = GST_THEORA_ENC (object);

  GST_DEBUG_OBJECT (enc, "Finalizing");
  theora_clear (&enc->state);
  theora_comment_clear (&enc->comment);
  theora_info_clear (&enc->info);

  G_OBJECT_CLASS (parent_class)->finalize (object);
}
static void
theora_enc_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec)
{
  GstTheoraEnc *enc = GST_THEORA_ENC (object);

  switch (prop_id) {
    case ARG_CENTER:
      enc->center = g_value_get_boolean (value);
      break;
    case ARG_BORDER:
      enc->border = g_value_get_enum (value);
      break;
    case ARG_BITRATE:
      enc->video_bitrate = g_value_get_int (value) * 1000;
      enc->video_quality = 0;
      break;
    case ARG_QUALITY:
      enc->video_quality = g_value_get_int (value);
      enc->video_bitrate = 0;
      break;
    case ARG_QUICK:
      enc->quick = g_value_get_boolean (value);
      break;
    case ARG_KEYFRAME_AUTO:
      enc->keyframe_auto = g_value_get_boolean (value);
      break;
    case ARG_KEYFRAME_FREQ:
      enc->keyframe_freq = g_value_get_int (value);
      break;
    case ARG_KEYFRAME_FREQ_FORCE:
      enc->keyframe_force = g_value_get_int (value);
      break;
    case ARG_KEYFRAME_THRESHOLD:
      enc->keyframe_threshold = g_value_get_int (value);
      break;
    case ARG_KEYFRAME_MINDISTANCE:
      enc->keyframe_mindistance = g_value_get_int (value);
      break;
    case ARG_NOISE_SENSITIVITY:
      enc->noise_sensitivity = g_value_get_int (value);
      break;
    case ARG_SHARPNESS:
      enc->sharpness = g_value_get_int (value);
      break;
    default:
      G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
      break;
  }
}
Exemple #6
0
static void
theora_enc_finalize (GObject * object)
{
  GstTheoraEnc *enc = GST_THEORA_ENC (object);

  GST_DEBUG_OBJECT (enc, "Finalizing");
  if (enc->encoder)
    th_encode_free (enc->encoder);
  th_comment_clear (&enc->comment);
  th_info_clear (&enc->info);
  g_free (enc->multipass_cache_file);

  theora_enc_clear_multipass_cache (enc);

  G_OBJECT_CLASS (parent_class)->finalize (object);
}
static GstStateChangeReturn
theora_enc_change_state (GstElement * element, GstStateChange transition)
{
  GstTheoraEnc *enc;
  GstStateChangeReturn ret;

  enc = GST_THEORA_ENC (element);

  switch (transition) {
    case GST_STATE_CHANGE_NULL_TO_READY:
      break;
    case GST_STATE_CHANGE_READY_TO_PAUSED:
      GST_DEBUG_OBJECT (enc, "READY->PAUSED Initing theora state");
      theora_info_init (&enc->info);
      theora_comment_init (&enc->comment);
      enc->packetno = 0;
      break;
    case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
      break;
    default:
      break;
  }

  ret = parent_class->change_state (element, transition);

  switch (transition) {
    case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
      break;
    case GST_STATE_CHANGE_PAUSED_TO_READY:
      GST_DEBUG_OBJECT (enc, "PAUSED->READY Clearing theora state");
      theora_clear (&enc->state);
      theora_comment_clear (&enc->comment);
      theora_info_clear (&enc->info);

      theora_enc_clear (enc);
      enc->initialised = FALSE;
      break;
    case GST_STATE_CHANGE_READY_TO_NULL:
      break;
    default:
      break;
  }

  return ret;
}
static gboolean
theora_enc_stop (GstVideoEncoder * benc)
{
    GstTheoraEnc *enc;

    GST_DEBUG_OBJECT (benc, "stop: clearing theora state");
    enc = GST_THEORA_ENC (benc);

    if (enc->encoder) {
        th_encode_free (enc->encoder);
        enc->encoder = NULL;
    }
    th_comment_clear (&enc->comment);
    th_info_clear (&enc->info);

    enc->initialised = FALSE;

    return TRUE;
}
static GstFlowReturn
theora_enc_pre_push (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc = GST_THEORA_ENC (benc);
    guint64 pfn;

    /* see ext/ogg/README; OFFSET_END takes "our" granulepos, OFFSET its
     * time representation */
    /* granulepos from sync frame */
    pfn = frame->presentation_frame_number - frame->distance_from_sync;
    /* correct to correspond to linear running time */
    pfn -= enc->pfn_offset;
    pfn += enc->granulepos_offset + 1;
    /* granulepos */
    GST_BUFFER_OFFSET_END (frame->output_buffer) =
        (pfn << enc->info.keyframe_granule_shift) + frame->distance_from_sync;
    GST_BUFFER_OFFSET (frame->output_buffer) = granulepos_to_timestamp (enc,
            GST_BUFFER_OFFSET_END (frame->output_buffer));

    return GST_FLOW_OK;
}
static gboolean
theora_enc_finish (GstVideoEncoder * benc)
{
    GstTheoraEnc *enc;
    ogg_packet op;

    enc = GST_THEORA_ENC (benc);

    if (enc->initialised) {
        /* push last packet with eos flag, should not be called */
        while (th_encode_packetout (enc->encoder, 1, &op)) {
            theora_push_packet (enc, &op);
        }
    }
    if (enc->initialised && enc->multipass_cache_fd
            && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS)
        theora_enc_write_multipass_cache (enc, TRUE, TRUE);

    theora_enc_clear_multipass_cache (enc);

    return TRUE;
}
static gboolean
theora_enc_start (GstVideoEncoder * benc)
{
    GstTheoraEnc *enc;

    GST_DEBUG_OBJECT (benc, "start: init theora");
    enc = GST_THEORA_ENC (benc);

    th_info_init (&enc->info);
    th_comment_init (&enc->comment);
    enc->packetno = 0;

    if (enc->multipass_mode >= MULTIPASS_MODE_FIRST_PASS) {
        GError *err = NULL;

        if (!enc->multipass_cache_file) {
            GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
            return FALSE;
        }
        enc->multipass_cache_fd =
            g_io_channel_new_file (enc->multipass_cache_file,
                                   (enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS ? "w" : "r"), &err);

        if (enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS)
            enc->multipass_cache_adapter = gst_adapter_new ();

        if (!enc->multipass_cache_fd) {
            GST_ELEMENT_ERROR (enc, RESOURCE, OPEN_READ, (NULL),
                               ("Failed to open multipass cache file: %s", err->message));
            g_error_free (err);
            return FALSE;
        }

        g_io_channel_set_encoding (enc->multipass_cache_fd, NULL, NULL);
    }

    return TRUE;
}
static gboolean
theora_enc_stop (GstVideoEncoder * benc)
{
  GstTheoraEnc *enc;

  GST_DEBUG_OBJECT (benc, "stop: clearing theora state");
  enc = GST_THEORA_ENC (benc);

  if (enc->encoder)
    th_encode_free (enc->encoder);
  enc->encoder = NULL;
  th_comment_clear (&enc->comment);
  th_info_clear (&enc->info);

  if (enc->input_state)
    gst_video_codec_state_unref (enc->input_state);
  enc->input_state = NULL;

  /* Everything else is handled in reset() */
  theora_enc_clear_multipass_cache (enc);

  return TRUE;
}
static gboolean
theora_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  GstTheoraEnc *enc = GST_THEORA_ENC (gst_pad_get_parent (pad));
  const GValue *par;
  gint fps_n, fps_d;

  gst_structure_get_int (structure, "width", &enc->width);
  gst_structure_get_int (structure, "height", &enc->height);
  gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d);
  par = gst_structure_get_value (structure, "pixel-aspect-ratio");

  theora_info_clear (&enc->info);
  theora_info_init (&enc->info);
  /* Theora has a divisible-by-sixteen restriction for the encoded video size but
   * we can define a visible area using the frame_width/frame_height */
  enc->info_width = enc->info.width = (enc->width + 15) & ~15;
  enc->info_height = enc->info.height = (enc->height + 15) & ~15;
  enc->info.frame_width = enc->width;
  enc->info.frame_height = enc->height;

  /* center image if needed */
  if (enc->center) {
    /* make sure offset is even, for easier decoding */
    enc->offset_x = GST_ROUND_UP_2 ((enc->info_width - enc->width) / 2);
    enc->offset_y = GST_ROUND_UP_2 ((enc->info_height - enc->height) / 2);
  } else {
    enc->offset_x = 0;
    enc->offset_y = 0;
  }
  enc->info.offset_x = enc->offset_x;
  enc->info.offset_y = enc->offset_y;

  enc->info.fps_numerator = enc->fps_n = fps_n;
  enc->info.fps_denominator = enc->fps_d = fps_d;
  if (par) {
    enc->info.aspect_numerator = gst_value_get_fraction_numerator (par);
    enc->info.aspect_denominator = gst_value_get_fraction_denominator (par);
  } else {
    /* setting them to 0 indicates that the decoder can chose a good aspect
     * ratio, defaulting to 1/1 */
    enc->info.aspect_numerator = 0;
    enc->info.aspect_denominator = 0;
  }

  enc->info.colorspace = OC_CS_UNSPECIFIED;
  enc->info.target_bitrate = enc->video_bitrate;
  enc->info.quality = enc->video_quality;

  enc->info.dropframes_p = 0;
  enc->info.quick_p = (enc->quick ? 1 : 0);
  enc->info.keyframe_auto_p = (enc->keyframe_auto ? 1 : 0);
  enc->info.keyframe_frequency = enc->keyframe_freq;
  enc->info.keyframe_frequency_force = enc->keyframe_force;
  enc->info.keyframe_data_target_bitrate = enc->video_bitrate * 1.5;
  enc->info.keyframe_auto_threshold = enc->keyframe_threshold;
  enc->info.keyframe_mindistance = enc->keyframe_mindistance;
  enc->info.noise_sensitivity = enc->noise_sensitivity;
  enc->info.sharpness = enc->sharpness;

  /* as done in theora */
  enc->granule_shift = _ilog (enc->info.keyframe_frequency_force - 1);
  GST_DEBUG_OBJECT (enc,
      "keyframe_frequency_force is %d, granule shift is %d",
      enc->info.keyframe_frequency_force, enc->granule_shift);

  theora_enc_reset (enc);
  enc->initialised = TRUE;

  gst_object_unref (enc);

  return TRUE;
}
Exemple #14
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;
  gboolean force_keyframe;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);

  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
  if ((gint64) running_time < 0) {
    GST_DEBUG_OBJECT (enc, "Dropping buffer, timestamp: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
    gst_buffer_unref (buffer);
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK (enc);
  if (enc->bitrate_changed) {
    long int bitrate = enc->video_bitrate;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
        sizeof (long int));
    enc->bitrate_changed = FALSE;
  }

  if (enc->quality_changed) {
    long int quality = enc->video_quality;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
        sizeof (long int));
    enc->quality_changed = FALSE;
  }

  /* see if we need to schedule a keyframe */
  force_keyframe = enc->force_keyframe;
  enc->force_keyframe = FALSE;
  GST_OBJECT_UNLOCK (enc);

  if (force_keyframe) {
    GstClockTime stream_time;
    GstStructure *s;

    stream_time = gst_segment_to_stream_time (&enc->segment,
        GST_FORMAT_TIME, timestamp);

    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    theora_enc_force_keyframe (enc);

    gst_pad_push_event (enc->srcpad,
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf;
    GSList *buffers = NULL;
    int result;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* create the remaining theora headers */
    th_comment_clear (&enc->comment);
    th_comment_init (&enc->comment);

    while ((result =
            th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
      ret =
          theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
          GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf);
      if (ret != GST_FLOW_OK) {
        goto header_buffer_alloc;
      }
      buffers = g_slist_prepend (buffers, buf);
    }
    if (result < 0) {
      g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
      g_slist_free (buffers);
      goto encoder_disabled;
    }

    buffers = g_slist_reverse (buffers);

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buffers);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    g_slist_foreach (buffers, (GFunc) gst_buffer_set_caps, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    while (buffers) {
      buf = buffers->data;
      buffers = g_slist_delete_link (buffers, buffers);
      if ((ret = theora_push_buffer (enc, buf)) != GST_FLOW_OK) {
        g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
        g_slist_free (buffers);
        goto header_push;
      }
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    th_ycbcr_buffer ycbcr;
    gint res;

    theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
      if (!theora_enc_read_multipass_cache (enc)) {
        ret = GST_FLOW_ERROR;
        goto multipass_read_failed;
      }
    }

    res = th_encode_ycbcr_in (enc->encoder, ycbcr);
    /* none of the failure cases can happen here */
    g_assert (res == 0);

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
      if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
        ret = GST_FLOW_ERROR;
        goto multipass_write_failed;
      }
    }

    ret = GST_FLOW_OK;
    while (th_encode_packetout (enc->encoder, 0, &op)) {
      GstClockTime next_time;

      next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
multipass_read_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
multipass_write_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
Exemple #15
0
static GstStateChangeReturn
theora_enc_change_state (GstElement * element, GstStateChange transition)
{
  GstTheoraEnc *enc;
  GstStateChangeReturn ret;

  enc = GST_THEORA_ENC (element);

  switch (transition) {
    case GST_STATE_CHANGE_NULL_TO_READY:
      break;
    case GST_STATE_CHANGE_READY_TO_PAUSED:
      GST_DEBUG_OBJECT (enc, "READY->PAUSED Initing theora state");
      th_info_init (&enc->info);
      th_comment_init (&enc->comment);
      enc->packetno = 0;
      enc->force_keyframe = FALSE;

      if (enc->multipass_mode >= MULTIPASS_MODE_FIRST_PASS) {
        GError *err = NULL;

        if (!enc->multipass_cache_file) {
          ret = GST_STATE_CHANGE_FAILURE;
          GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
          return ret;
        }
        enc->multipass_cache_fd =
            g_io_channel_new_file (enc->multipass_cache_file,
            (enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS ? "w" : "r"),
            &err);

        if (enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS)
          enc->multipass_cache_adapter = gst_adapter_new ();

        if (!enc->multipass_cache_fd) {
          ret = GST_STATE_CHANGE_FAILURE;
          GST_ELEMENT_ERROR (enc, RESOURCE, OPEN_READ, (NULL),
              ("Failed to open multipass cache file: %s", err->message));
          g_error_free (err);
          return ret;
        }

        g_io_channel_set_encoding (enc->multipass_cache_fd, NULL, NULL);
      }
      break;
    case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
      break;
    default:
      break;
  }

  ret = parent_class->change_state (element, transition);

  switch (transition) {
    case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
      break;
    case GST_STATE_CHANGE_PAUSED_TO_READY:
      GST_DEBUG_OBJECT (enc, "PAUSED->READY Clearing theora state");
      if (enc->encoder) {
        th_encode_free (enc->encoder);
        enc->encoder = NULL;
      }
      th_comment_clear (&enc->comment);
      th_info_clear (&enc->info);

      theora_enc_clear (enc);
      enc->initialised = FALSE;
      break;
    case GST_STATE_CHANGE_READY_TO_NULL:
      break;
    default:
      break;
  }

  return ret;
}
Exemple #16
0
static gboolean
theora_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  GstTheoraEnc *enc = GST_THEORA_ENC (gst_pad_get_parent (pad));
  guint32 fourcc;
  const GValue *par;
  gint fps_n, fps_d;

  gst_structure_get_fourcc (structure, "format", &fourcc);
  gst_structure_get_int (structure, "width", &enc->width);
  gst_structure_get_int (structure, "height", &enc->height);
  gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d);
  par = gst_structure_get_value (structure, "pixel-aspect-ratio");

  th_info_clear (&enc->info);
  th_info_init (&enc->info);
  /* Theora has a divisible-by-sixteen restriction for the encoded video size but
   * we can define a picture area using pic_width/pic_height */
  enc->info.frame_width = GST_ROUND_UP_16 (enc->width);
  enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
  enc->info.pic_width = enc->width;
  enc->info.pic_height = enc->height;
  switch (fourcc) {
    case GST_MAKE_FOURCC ('I', '4', '2', '0'):
      enc->info.pixel_fmt = TH_PF_420;
      break;
    case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
      enc->info.pixel_fmt = TH_PF_422;
      break;
    case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
      enc->info.pixel_fmt = TH_PF_444;
      break;
    default:
      g_assert_not_reached ();
  }

  enc->info.fps_numerator = enc->fps_n = fps_n;
  enc->info.fps_denominator = enc->fps_d = fps_d;
  if (par) {
    enc->info.aspect_numerator = gst_value_get_fraction_numerator (par);
    enc->info.aspect_denominator = gst_value_get_fraction_denominator (par);
  } else {
    /* setting them to 0 indicates that the decoder can chose a good aspect
     * ratio, defaulting to 1/1 */
    enc->info.aspect_numerator = 0;
    enc->info.aspect_denominator = 0;
  }

  enc->info.colorspace = TH_CS_UNSPECIFIED;

  /* as done in theora */
  enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1);
  GST_DEBUG_OBJECT (enc,
      "keyframe_frequency_force is %d, granule shift is %d",
      enc->keyframe_force, enc->info.keyframe_granule_shift);

  theora_enc_reset (enc);
  enc->initialised = TRUE;

  gst_object_unref (enc);

  return TRUE;
}
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}
Exemple #18
0
static gboolean
theora_enc_sink_event (GstPad * pad, GstEvent * event)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  gboolean res;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_NEWSEGMENT:
    {
      gboolean update;
      gdouble rate, applied_rate;
      GstFormat format;
      gint64 start, stop, time;

      gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
          &format, &start, &stop, &time);

      gst_segment_set_newsegment_full (&enc->segment, update, rate,
          applied_rate, format, start, stop, time);

      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    case GST_EVENT_EOS:
      if (enc->initialised) {
        /* push last packet with eos flag, should not be called */
        while (th_encode_packetout (enc->encoder, 1, &op)) {
          GstClockTime next_time =
              th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;

          theora_push_packet (enc, &op, GST_CLOCK_TIME_NONE, enc->next_ts,
              next_time - enc->next_ts);
          enc->next_ts = next_time;
        }
      }
      if (enc->initialised && enc->multipass_cache_fd
          && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS)
        theora_enc_write_multipass_cache (enc, TRUE, TRUE);

      theora_enc_clear_multipass_cache (enc);

      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_FLUSH_STOP:
      gst_segment_init (&enc->segment, GST_FORMAT_UNDEFINED);
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_CUSTOM_DOWNSTREAM:
    {
      const GstStructure *s;

      s = gst_event_get_structure (event);

      if (gst_structure_has_name (s, "GstForceKeyUnit"))
        theora_enc_force_keyframe (enc);
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    default:
      res = gst_pad_push_event (enc->srcpad, event);
      break;
  }
  return res;
}
static void
theora_enc_set_property (GObject * object, guint prop_id,
                         const GValue * value, GParamSpec * pspec)
{
    GstTheoraEnc *enc = GST_THEORA_ENC (object);

    switch (prop_id) {
    case PROP_BITRATE:
        GST_OBJECT_LOCK (enc);
        enc->video_bitrate = g_value_get_int (value) * 1000;
        enc->video_quality = 0;
        enc->bitrate_changed = TRUE;
        GST_OBJECT_UNLOCK (enc);
        break;
    case PROP_QUALITY:
        GST_OBJECT_LOCK (enc);
        if (GST_STATE (enc) >= GST_STATE_PAUSED && enc->video_quality == 0) {
            GST_WARNING_OBJECT (object, "Can't change from bitrate to quality mode"
                                " while playing");
        } else {
            enc->video_quality = g_value_get_int (value);
            enc->video_bitrate = 0;
            enc->quality_changed = TRUE;
        }
        GST_OBJECT_UNLOCK (enc);
        break;
    case PROP_KEYFRAME_AUTO:
        enc->keyframe_auto = g_value_get_boolean (value);
        break;
    case PROP_KEYFRAME_FREQ:
        enc->keyframe_freq = g_value_get_int (value);
        break;
    case PROP_KEYFRAME_FREQ_FORCE:
        enc->keyframe_force = g_value_get_int (value);
        break;
    case PROP_SPEEDLEVEL:
        enc->speed_level = g_value_get_int (value);
        break;
    case PROP_VP3_COMPATIBLE:
        enc->vp3_compatible = g_value_get_boolean (value);
        break;
    case PROP_DROP_FRAMES:
        enc->drop_frames = g_value_get_boolean (value);
        break;
    case PROP_CAP_OVERFLOW:
        enc->cap_overflow = g_value_get_boolean (value);
        break;
    case PROP_CAP_UNDERFLOW:
        enc->cap_underflow = g_value_get_boolean (value);
        break;
    case PROP_RATE_BUFFER:
        enc->rate_buffer = g_value_get_int (value);
        break;
    case PROP_MULTIPASS_CACHE_FILE:
        enc->multipass_cache_file = g_value_dup_string (value);
        break;
    case PROP_MULTIPASS_MODE:
        enc->multipass_mode = g_value_get_enum (value);
        break;
    default:
        G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
        break;
    }
}
static gboolean
theora_enc_sink_event (GstPad * pad, GstEvent * event)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  gboolean res;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_NEWSEGMENT:
    {
      gboolean update;
      gdouble rate, applied_rate;
      GstFormat format;
      gint64 start, stop, time;

      gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
          &format, &start, &stop, &time);

      gst_segment_set_newsegment_full (&enc->segment, update, rate,
          applied_rate, format, start, stop, time);

      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    case GST_EVENT_EOS:
      if (enc->initialised) {
        /* push last packet with eos flag, should not be called */
        while (theora_encode_packetout (&enc->state, 1, &op)) {
          GstClockTime next_time =
              theora_enc_get_ogg_packet_end_time (enc, &op);

          theora_push_packet (enc, &op, GST_CLOCK_TIME_NONE, enc->next_ts,
              next_time - enc->next_ts);
          enc->next_ts = next_time;
        }
      }
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_FLUSH_STOP:
      gst_segment_init (&enc->segment, GST_FORMAT_UNDEFINED);
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_CUSTOM_DOWNSTREAM:
    {
      const GstStructure *s;

      s = gst_event_get_structure (event);

      if (gst_structure_has_name (s, "GstForceKeyUnit")) {
        GstClockTime next_ts;

        /* make sure timestamps increment after resetting the decoder */
        next_ts = enc->next_ts + enc->timestamp_offset;

        theora_enc_reset (enc);
        enc->granulepos_offset =
            gst_util_uint64_scale (next_ts, enc->fps_n,
            GST_SECOND * enc->fps_d);
        enc->timestamp_offset = next_ts;
        enc->next_ts = 0;
      }
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    default:
      res = gst_pad_push_event (enc->srcpad, event);
      break;
  }
  return res;
}
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);
  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf1, *buf2, *buf3;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* first packet will get its own page automatically */
    if (theora_encode_header (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf1);
    if (ret != GST_FLOW_OK) {
      goto header_buffer_alloc;
    }

    /* create the remaining theora headers */
    theora_comment_clear (&enc->comment);
    theora_comment_init (&enc->comment);

    if (theora_encode_comment (&enc->comment, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf2);
    /* Theora expects us to put this packet buffer into an ogg page,
     * in which case it becomes the ogg library's responsibility to
     * free it. Since we're copying and outputting a gst_buffer,
     * we need to free it ourselves. */
    if (op.packet)
      free (op.packet);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      goto header_buffer_alloc;
    }

    if (theora_encode_tables (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf3);
    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      gst_buffer_unref (buf2);
      goto header_buffer_alloc;
    }

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buf1, buf2, buf3);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_buffer_set_caps (buf3, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    if ((ret = theora_push_buffer (enc, buf1)) != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf2)) != GST_FLOW_OK) {
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf3)) != GST_FLOW_OK) {
      goto header_push;
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    yuv_buffer yuv;
    gint res;
    gint y_size;
    guint8 *pixels;

    yuv.y_width = enc->info_width;
    yuv.y_height = enc->info_height;
    yuv.y_stride = enc->info_width;

    yuv.uv_width = enc->info_width / 2;
    yuv.uv_height = enc->info_height / 2;
    yuv.uv_stride = yuv.uv_width;

    y_size = enc->info_width * enc->info_height;

    if (enc->width == enc->info_width && enc->height == enc->info_height) {
      GST_LOG_OBJECT (enc, "no cropping/conversion needed");
      /* easy case, no cropping/conversion needed */
      pixels = GST_BUFFER_DATA (buffer);

      yuv.y = pixels;
      yuv.u = yuv.y + y_size;
      yuv.v = yuv.u + y_size / 4;
    } else {
      GstBuffer *newbuf;
      gint i;
      guchar *dest_y, *src_y;
      guchar *dest_u, *src_u;
      guchar *dest_v, *src_v;
      gint src_y_stride, src_uv_stride;
      gint dst_y_stride, dst_uv_stride;
      gint width, height;
      gint cwidth, cheight;
      gint offset_x, right_x, right_border;

      GST_LOG_OBJECT (enc, "cropping/conversion needed for strides");
      /* source width/height */
      width = enc->width;
      height = enc->height;
      /* soucre chroma width/height */
      cwidth = width / 2;
      cheight = height / 2;

      /* source strides as defined in videotestsrc */
      src_y_stride = GST_ROUND_UP_4 (width);
      src_uv_stride = GST_ROUND_UP_8 (width) / 2;

      /* destination strides from the real picture width */
      dst_y_stride = enc->info_width;
      dst_uv_stride = enc->info_width / 2;

      newbuf = gst_buffer_new_and_alloc (y_size * 3 / 2);
      if (!newbuf) {
        ret = GST_FLOW_ERROR;
        goto no_buffer;
      }
      GST_BUFFER_OFFSET (newbuf) = GST_BUFFER_OFFSET_NONE;
      gst_buffer_set_caps (newbuf, GST_PAD_CAPS (enc->srcpad));

      dest_y = yuv.y = GST_BUFFER_DATA (newbuf);
      dest_u = yuv.u = yuv.y + y_size;
      dest_v = yuv.v = yuv.u + y_size / 4;

      src_y = GST_BUFFER_DATA (buffer);
      src_u = src_y + src_y_stride * GST_ROUND_UP_2 (height);
      src_v = src_u + src_uv_stride * GST_ROUND_UP_2 (height) / 2;

      if (enc->border != BORDER_NONE) {
        /* fill top border */
        for (i = 0; i < enc->offset_y; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }
      } else {
        dest_y += dst_y_stride * enc->offset_y;
      }

      offset_x = enc->offset_x;
      right_x = width + enc->offset_x;
      right_border = dst_y_stride - right_x;

      /* copy Y plane */
      for (i = 0; i < height; i++) {
        memcpy (dest_y + offset_x, src_y, width);
        if (enc->border != BORDER_NONE) {
          memset (dest_y, 0, offset_x);
          memset (dest_y + right_x, 0, right_border);
        }

        dest_y += dst_y_stride;
        src_y += src_y_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = height + enc->offset_y; i < enc->info.height; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }

        /* fill top border chroma */
        for (i = 0; i < enc->offset_y / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      } else {
        dest_u += dst_uv_stride * enc->offset_y / 2;
        dest_v += dst_uv_stride * enc->offset_y / 2;
      }

      offset_x = enc->offset_x / 2;
      right_x = cwidth + offset_x;
      right_border = dst_uv_stride - right_x;

      /* copy UV planes */
      for (i = 0; i < cheight; i++) {
        memcpy (dest_v + offset_x, src_v, cwidth);
        memcpy (dest_u + offset_x, src_u, cwidth);

        if (enc->border != BORDER_NONE) {
          memset (dest_u, 128, offset_x);
          memset (dest_u + right_x, 128, right_border);
          memset (dest_v, 128, offset_x);
          memset (dest_v + right_x, 128, right_border);
        }

        dest_u += dst_uv_stride;
        dest_v += dst_uv_stride;
        src_u += src_uv_stride;
        src_v += src_uv_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = cheight + enc->offset_y / 2; i < enc->info_height / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      }

      gst_buffer_unref (buffer);
      buffer = newbuf;
    }

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    res = theora_encode_YUVin (&enc->state, &yuv);

    ret = GST_FLOW_OK;
    while (theora_encode_packetout (&enc->state, 0, &op)) {
      GstClockTime next_time;

      next_time = theora_enc_get_ogg_packet_end_time (enc, &op);

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
no_buffer:
  {
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
static gboolean
theora_enc_flush (GstVideoEncoder * encoder)
{
  GstTheoraEnc *enc = GST_THEORA_ENC (encoder);
  ogg_uint32_t keyframe_force;
  int rate_flags;


  if (enc->input_state == NULL) {
    GST_INFO_OBJECT (enc, "Not configured yet, returning FALSE");

    return FALSE;
  }

  GST_OBJECT_LOCK (enc);
  enc->info.target_bitrate = enc->video_bitrate;
  enc->info.quality = enc->video_quality;
  enc->bitrate_changed = FALSE;
  enc->quality_changed = FALSE;
  GST_OBJECT_UNLOCK (enc);

  if (enc->encoder)
    th_encode_free (enc->encoder);

  enc->encoder = th_encode_alloc (&enc->info);
  /* We ensure this function cannot fail. */
  g_assert (enc->encoder != NULL);
  th_encode_ctl (enc->encoder, TH_ENCCTL_SET_SPLEVEL, &enc->speed_level,
      sizeof (enc->speed_level));
  th_encode_ctl (enc->encoder, TH_ENCCTL_SET_VP3_COMPATIBLE,
      &enc->vp3_compatible, sizeof (enc->vp3_compatible));

  rate_flags = 0;
  if (enc->drop_frames)
    rate_flags |= TH_RATECTL_DROP_FRAMES;
  if (enc->drop_frames)
    rate_flags |= TH_RATECTL_CAP_OVERFLOW;
  if (enc->drop_frames)
    rate_flags |= TH_RATECTL_CAP_UNDERFLOW;
  th_encode_ctl (enc->encoder, TH_ENCCTL_SET_RATE_FLAGS,
      &rate_flags, sizeof (rate_flags));

  if (enc->rate_buffer) {
    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_RATE_BUFFER,
        &enc->rate_buffer, sizeof (enc->rate_buffer));
  } else {
    /* FIXME */
  }

  keyframe_force = enc->keyframe_auto ?
      enc->keyframe_force : enc->keyframe_freq;
  th_encode_ctl (enc->encoder, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
      &keyframe_force, sizeof (keyframe_force));

  /* Get placeholder data */
  if (enc->multipass_cache_fd
      && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS)
    theora_enc_write_multipass_cache (enc, TRUE, FALSE);

  return TRUE;
}