Пример #1
0
static gboolean
theora_enc_set_format (GstVideoEncoder * benc, GstVideoCodecState * state)
{
    GstTheoraEnc *enc = GST_THEORA_ENC (benc);
    GstVideoInfo *info = &state->info;

    enc->width = GST_VIDEO_INFO_WIDTH (info);
    enc->height = GST_VIDEO_INFO_HEIGHT (info);

    th_info_clear (&enc->info);
    th_info_init (&enc->info);
    /* Theora has a divisible-by-sixteen restriction for the encoded video size but
     * we can define a picture area using pic_width/pic_height */
    enc->info.frame_width = GST_ROUND_UP_16 (enc->width);
    enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
    enc->info.pic_width = enc->width;
    enc->info.pic_height = enc->height;
    switch (GST_VIDEO_INFO_FORMAT (info)) {
    case GST_VIDEO_FORMAT_I420:
        enc->info.pixel_fmt = TH_PF_420;
        break;
    case GST_VIDEO_FORMAT_Y42B:
        enc->info.pixel_fmt = TH_PF_422;
        break;
    case GST_VIDEO_FORMAT_Y444:
        enc->info.pixel_fmt = TH_PF_444;
        break;
    default:
        g_assert_not_reached ();
    }

    enc->info.fps_numerator = enc->fps_n = GST_VIDEO_INFO_FPS_N (info);
    enc->info.fps_denominator = enc->fps_d = GST_VIDEO_INFO_FPS_D (info);
    enc->info.aspect_numerator = GST_VIDEO_INFO_PAR_N (info);
    enc->info.aspect_denominator = GST_VIDEO_INFO_PAR_D (info);

    enc->info.colorspace = TH_CS_UNSPECIFIED;

    /* Save input state */
    if (enc->input_state)
        gst_video_codec_state_unref (enc->input_state);
    enc->input_state = gst_video_codec_state_ref (state);

    /* as done in theora */
    enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1);
    GST_DEBUG_OBJECT (enc,
                      "keyframe_frequency_force is %d, granule shift is %d",
                      enc->keyframe_force, enc->info.keyframe_granule_shift);

    theora_enc_reset (enc);
    enc->initialised = TRUE;

    return TRUE;
}
Пример #2
0
static void
theora_enc_force_keyframe (GstTheoraEnc * enc)
{
  GstClockTime next_ts;

  /* make sure timestamps increment after resetting the decoder */
  next_ts = enc->next_ts + enc->timestamp_offset;

  theora_enc_reset (enc);
  enc->granulepos_offset =
      gst_util_uint64_scale (next_ts, enc->fps_n, GST_SECOND * enc->fps_d);
  enc->timestamp_offset = next_ts;
  enc->next_ts = 0;
}
Пример #3
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);
  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf1, *buf2, *buf3;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* first packet will get its own page automatically */
    if (theora_encode_header (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf1);
    if (ret != GST_FLOW_OK) {
      goto header_buffer_alloc;
    }

    /* create the remaining theora headers */
    theora_comment_clear (&enc->comment);
    theora_comment_init (&enc->comment);

    if (theora_encode_comment (&enc->comment, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf2);
    /* Theora expects us to put this packet buffer into an ogg page,
     * in which case it becomes the ogg library's responsibility to
     * free it. Since we're copying and outputting a gst_buffer,
     * we need to free it ourselves. */
    if (op.packet)
      free (op.packet);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      goto header_buffer_alloc;
    }

    if (theora_encode_tables (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf3);
    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      gst_buffer_unref (buf2);
      goto header_buffer_alloc;
    }

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buf1, buf2, buf3);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_buffer_set_caps (buf3, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    if ((ret = theora_push_buffer (enc, buf1)) != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf2)) != GST_FLOW_OK) {
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf3)) != GST_FLOW_OK) {
      goto header_push;
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    yuv_buffer yuv;
    gint res;
    gint y_size;
    guint8 *pixels;

    yuv.y_width = enc->info_width;
    yuv.y_height = enc->info_height;
    yuv.y_stride = enc->info_width;

    yuv.uv_width = enc->info_width / 2;
    yuv.uv_height = enc->info_height / 2;
    yuv.uv_stride = yuv.uv_width;

    y_size = enc->info_width * enc->info_height;

    if (enc->width == enc->info_width && enc->height == enc->info_height) {
      GST_LOG_OBJECT (enc, "no cropping/conversion needed");
      /* easy case, no cropping/conversion needed */
      pixels = GST_BUFFER_DATA (buffer);

      yuv.y = pixels;
      yuv.u = yuv.y + y_size;
      yuv.v = yuv.u + y_size / 4;
    } else {
      GstBuffer *newbuf;
      gint i;
      guchar *dest_y, *src_y;
      guchar *dest_u, *src_u;
      guchar *dest_v, *src_v;
      gint src_y_stride, src_uv_stride;
      gint dst_y_stride, dst_uv_stride;
      gint width, height;
      gint cwidth, cheight;
      gint offset_x, right_x, right_border;

      GST_LOG_OBJECT (enc, "cropping/conversion needed for strides");
      /* source width/height */
      width = enc->width;
      height = enc->height;
      /* soucre chroma width/height */
      cwidth = width / 2;
      cheight = height / 2;

      /* source strides as defined in videotestsrc */
      src_y_stride = GST_ROUND_UP_4 (width);
      src_uv_stride = GST_ROUND_UP_8 (width) / 2;

      /* destination strides from the real picture width */
      dst_y_stride = enc->info_width;
      dst_uv_stride = enc->info_width / 2;

      newbuf = gst_buffer_new_and_alloc (y_size * 3 / 2);
      if (!newbuf) {
        ret = GST_FLOW_ERROR;
        goto no_buffer;
      }
      GST_BUFFER_OFFSET (newbuf) = GST_BUFFER_OFFSET_NONE;
      gst_buffer_set_caps (newbuf, GST_PAD_CAPS (enc->srcpad));

      dest_y = yuv.y = GST_BUFFER_DATA (newbuf);
      dest_u = yuv.u = yuv.y + y_size;
      dest_v = yuv.v = yuv.u + y_size / 4;

      src_y = GST_BUFFER_DATA (buffer);
      src_u = src_y + src_y_stride * GST_ROUND_UP_2 (height);
      src_v = src_u + src_uv_stride * GST_ROUND_UP_2 (height) / 2;

      if (enc->border != BORDER_NONE) {
        /* fill top border */
        for (i = 0; i < enc->offset_y; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }
      } else {
        dest_y += dst_y_stride * enc->offset_y;
      }

      offset_x = enc->offset_x;
      right_x = width + enc->offset_x;
      right_border = dst_y_stride - right_x;

      /* copy Y plane */
      for (i = 0; i < height; i++) {
        memcpy (dest_y + offset_x, src_y, width);
        if (enc->border != BORDER_NONE) {
          memset (dest_y, 0, offset_x);
          memset (dest_y + right_x, 0, right_border);
        }

        dest_y += dst_y_stride;
        src_y += src_y_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = height + enc->offset_y; i < enc->info.height; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }

        /* fill top border chroma */
        for (i = 0; i < enc->offset_y / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      } else {
        dest_u += dst_uv_stride * enc->offset_y / 2;
        dest_v += dst_uv_stride * enc->offset_y / 2;
      }

      offset_x = enc->offset_x / 2;
      right_x = cwidth + offset_x;
      right_border = dst_uv_stride - right_x;

      /* copy UV planes */
      for (i = 0; i < cheight; i++) {
        memcpy (dest_v + offset_x, src_v, cwidth);
        memcpy (dest_u + offset_x, src_u, cwidth);

        if (enc->border != BORDER_NONE) {
          memset (dest_u, 128, offset_x);
          memset (dest_u + right_x, 128, right_border);
          memset (dest_v, 128, offset_x);
          memset (dest_v + right_x, 128, right_border);
        }

        dest_u += dst_uv_stride;
        dest_v += dst_uv_stride;
        src_u += src_uv_stride;
        src_v += src_uv_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = cheight + enc->offset_y / 2; i < enc->info_height / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      }

      gst_buffer_unref (buffer);
      buffer = newbuf;
    }

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    res = theora_encode_YUVin (&enc->state, &yuv);

    ret = GST_FLOW_OK;
    while (theora_encode_packetout (&enc->state, 0, &op)) {
      GstClockTime next_time;

      next_time = theora_enc_get_ogg_packet_end_time (enc, &op);

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
no_buffer:
  {
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
Пример #4
0
static gboolean
theora_enc_sink_event (GstPad * pad, GstEvent * event)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  gboolean res;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_NEWSEGMENT:
    {
      gboolean update;
      gdouble rate, applied_rate;
      GstFormat format;
      gint64 start, stop, time;

      gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
          &format, &start, &stop, &time);

      gst_segment_set_newsegment_full (&enc->segment, update, rate,
          applied_rate, format, start, stop, time);

      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    case GST_EVENT_EOS:
      if (enc->initialised) {
        /* push last packet with eos flag, should not be called */
        while (theora_encode_packetout (&enc->state, 1, &op)) {
          GstClockTime next_time =
              theora_enc_get_ogg_packet_end_time (enc, &op);

          theora_push_packet (enc, &op, GST_CLOCK_TIME_NONE, enc->next_ts,
              next_time - enc->next_ts);
          enc->next_ts = next_time;
        }
      }
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_FLUSH_STOP:
      gst_segment_init (&enc->segment, GST_FORMAT_UNDEFINED);
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_CUSTOM_DOWNSTREAM:
    {
      const GstStructure *s;

      s = gst_event_get_structure (event);

      if (gst_structure_has_name (s, "GstForceKeyUnit")) {
        GstClockTime next_ts;

        /* make sure timestamps increment after resetting the decoder */
        next_ts = enc->next_ts + enc->timestamp_offset;

        theora_enc_reset (enc);
        enc->granulepos_offset =
            gst_util_uint64_scale (next_ts, enc->fps_n,
            GST_SECOND * enc->fps_d);
        enc->timestamp_offset = next_ts;
        enc->next_ts = 0;
      }
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    default:
      res = gst_pad_push_event (enc->srcpad, event);
      break;
  }
  return res;
}
Пример #5
0
static gboolean
theora_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  GstTheoraEnc *enc = GST_THEORA_ENC (gst_pad_get_parent (pad));
  const GValue *par;
  gint fps_n, fps_d;

  gst_structure_get_int (structure, "width", &enc->width);
  gst_structure_get_int (structure, "height", &enc->height);
  gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d);
  par = gst_structure_get_value (structure, "pixel-aspect-ratio");

  theora_info_clear (&enc->info);
  theora_info_init (&enc->info);
  /* Theora has a divisible-by-sixteen restriction for the encoded video size but
   * we can define a visible area using the frame_width/frame_height */
  enc->info_width = enc->info.width = (enc->width + 15) & ~15;
  enc->info_height = enc->info.height = (enc->height + 15) & ~15;
  enc->info.frame_width = enc->width;
  enc->info.frame_height = enc->height;

  /* center image if needed */
  if (enc->center) {
    /* make sure offset is even, for easier decoding */
    enc->offset_x = GST_ROUND_UP_2 ((enc->info_width - enc->width) / 2);
    enc->offset_y = GST_ROUND_UP_2 ((enc->info_height - enc->height) / 2);
  } else {
    enc->offset_x = 0;
    enc->offset_y = 0;
  }
  enc->info.offset_x = enc->offset_x;
  enc->info.offset_y = enc->offset_y;

  enc->info.fps_numerator = enc->fps_n = fps_n;
  enc->info.fps_denominator = enc->fps_d = fps_d;
  if (par) {
    enc->info.aspect_numerator = gst_value_get_fraction_numerator (par);
    enc->info.aspect_denominator = gst_value_get_fraction_denominator (par);
  } else {
    /* setting them to 0 indicates that the decoder can chose a good aspect
     * ratio, defaulting to 1/1 */
    enc->info.aspect_numerator = 0;
    enc->info.aspect_denominator = 0;
  }

  enc->info.colorspace = OC_CS_UNSPECIFIED;
  enc->info.target_bitrate = enc->video_bitrate;
  enc->info.quality = enc->video_quality;

  enc->info.dropframes_p = 0;
  enc->info.quick_p = (enc->quick ? 1 : 0);
  enc->info.keyframe_auto_p = (enc->keyframe_auto ? 1 : 0);
  enc->info.keyframe_frequency = enc->keyframe_freq;
  enc->info.keyframe_frequency_force = enc->keyframe_force;
  enc->info.keyframe_data_target_bitrate = enc->video_bitrate * 1.5;
  enc->info.keyframe_auto_threshold = enc->keyframe_threshold;
  enc->info.keyframe_mindistance = enc->keyframe_mindistance;
  enc->info.noise_sensitivity = enc->noise_sensitivity;
  enc->info.sharpness = enc->sharpness;

  /* as done in theora */
  enc->granule_shift = _ilog (enc->info.keyframe_frequency_force - 1);
  GST_DEBUG_OBJECT (enc,
      "keyframe_frequency_force is %d, granule shift is %d",
      enc->info.keyframe_frequency_force, enc->granule_shift);

  theora_enc_reset (enc);
  enc->initialised = TRUE;

  gst_object_unref (enc);

  return TRUE;
}
Пример #6
0
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}
Пример #7
0
static gboolean
theora_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  GstTheoraEnc *enc = GST_THEORA_ENC (gst_pad_get_parent (pad));
  guint32 fourcc;
  const GValue *par;
  gint fps_n, fps_d;

  gst_structure_get_fourcc (structure, "format", &fourcc);
  gst_structure_get_int (structure, "width", &enc->width);
  gst_structure_get_int (structure, "height", &enc->height);
  gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d);
  par = gst_structure_get_value (structure, "pixel-aspect-ratio");

  th_info_clear (&enc->info);
  th_info_init (&enc->info);
  /* Theora has a divisible-by-sixteen restriction for the encoded video size but
   * we can define a picture area using pic_width/pic_height */
  enc->info.frame_width = GST_ROUND_UP_16 (enc->width);
  enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
  enc->info.pic_width = enc->width;
  enc->info.pic_height = enc->height;
  switch (fourcc) {
    case GST_MAKE_FOURCC ('I', '4', '2', '0'):
      enc->info.pixel_fmt = TH_PF_420;
      break;
    case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
      enc->info.pixel_fmt = TH_PF_422;
      break;
    case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
      enc->info.pixel_fmt = TH_PF_444;
      break;
    default:
      g_assert_not_reached ();
  }

  enc->info.fps_numerator = enc->fps_n = fps_n;
  enc->info.fps_denominator = enc->fps_d = fps_d;
  if (par) {
    enc->info.aspect_numerator = gst_value_get_fraction_numerator (par);
    enc->info.aspect_denominator = gst_value_get_fraction_denominator (par);
  } else {
    /* setting them to 0 indicates that the decoder can chose a good aspect
     * ratio, defaulting to 1/1 */
    enc->info.aspect_numerator = 0;
    enc->info.aspect_denominator = 0;
  }

  enc->info.colorspace = TH_CS_UNSPECIFIED;

  /* as done in theora */
  enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1);
  GST_DEBUG_OBJECT (enc,
      "keyframe_frequency_force is %d, granule shift is %d",
      enc->keyframe_force, enc->info.keyframe_granule_shift);

  theora_enc_reset (enc);
  enc->initialised = TRUE;

  gst_object_unref (enc);

  return TRUE;
}
Пример #8
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;
  gboolean force_keyframe;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);

  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
  if ((gint64) running_time < 0) {
    GST_DEBUG_OBJECT (enc, "Dropping buffer, timestamp: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
    gst_buffer_unref (buffer);
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK (enc);
  if (enc->bitrate_changed) {
    long int bitrate = enc->video_bitrate;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
        sizeof (long int));
    enc->bitrate_changed = FALSE;
  }

  if (enc->quality_changed) {
    long int quality = enc->video_quality;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
        sizeof (long int));
    enc->quality_changed = FALSE;
  }

  /* see if we need to schedule a keyframe */
  force_keyframe = enc->force_keyframe;
  enc->force_keyframe = FALSE;
  GST_OBJECT_UNLOCK (enc);

  if (force_keyframe) {
    GstClockTime stream_time;
    GstStructure *s;

    stream_time = gst_segment_to_stream_time (&enc->segment,
        GST_FORMAT_TIME, timestamp);

    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    theora_enc_force_keyframe (enc);

    gst_pad_push_event (enc->srcpad,
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf;
    GSList *buffers = NULL;
    int result;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* create the remaining theora headers */
    th_comment_clear (&enc->comment);
    th_comment_init (&enc->comment);

    while ((result =
            th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
      ret =
          theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
          GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf);
      if (ret != GST_FLOW_OK) {
        goto header_buffer_alloc;
      }
      buffers = g_slist_prepend (buffers, buf);
    }
    if (result < 0) {
      g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
      g_slist_free (buffers);
      goto encoder_disabled;
    }

    buffers = g_slist_reverse (buffers);

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buffers);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    g_slist_foreach (buffers, (GFunc) gst_buffer_set_caps, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    while (buffers) {
      buf = buffers->data;
      buffers = g_slist_delete_link (buffers, buffers);
      if ((ret = theora_push_buffer (enc, buf)) != GST_FLOW_OK) {
        g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
        g_slist_free (buffers);
        goto header_push;
      }
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    th_ycbcr_buffer ycbcr;
    gint res;

    theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
      if (!theora_enc_read_multipass_cache (enc)) {
        ret = GST_FLOW_ERROR;
        goto multipass_read_failed;
      }
    }

    res = th_encode_ycbcr_in (enc->encoder, ycbcr);
    /* none of the failure cases can happen here */
    g_assert (res == 0);

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
      if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
        ret = GST_FLOW_ERROR;
        goto multipass_write_failed;
      }
    }

    ret = GST_FLOW_OK;
    while (th_encode_packetout (enc->encoder, 0, &op)) {
      GstClockTime next_time;

      next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
multipass_read_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
multipass_write_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}