コード例 #1
0
ファイル: gstcvlaplace.c プロジェクト: kittee/gst-plugins-bad
static void
gst_cv_laplace_init (GstCvLaplace * filter)
{
    filter->aperture_size = DEFAULT_APERTURE_SIZE;

    gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), FALSE);
}
コード例 #2
0
static void
gst_alpha_color_init (GstAlphaColor * alpha)
{
  GstBaseTransform *btrans = GST_BASE_TRANSFORM (alpha);

  gst_base_transform_set_in_place (btrans, TRUE);
}
コード例 #3
0
void
gst_opencv_video_filter_set_in_place (GstOpencvVideoFilter * transform,
    gboolean ip)
{
  transform->in_place = ip;
  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (transform), ip);
}
コード例 #4
0
ファイル: gstnavseek.c プロジェクト: spunktsch/svtplayer
static void
gst_navseek_seek (GstNavSeek * navseek, gint64 offset)
{
  GstFormat peer_format = GST_FORMAT_TIME;
  gboolean ret;
  GstPad *peer_pad;
  gint64 peer_value;

  /* Query for the current time then attempt to set to time + offset */
  peer_pad = gst_pad_get_peer (GST_BASE_TRANSFORM (navseek)->sinkpad);
  ret = gst_pad_query_position (peer_pad, &peer_format, &peer_value);

  if (ret && peer_format == GST_FORMAT_TIME) {
    GstEvent *event;

    peer_value += offset;
    if (peer_value < 0)
      peer_value = 0;

    event = gst_event_new_seek (1.0, GST_FORMAT_TIME,
        GST_SEEK_FLAG_ACCURATE | GST_SEEK_FLAG_FLUSH,
        GST_SEEK_TYPE_SET, peer_value, GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE);

    gst_pad_send_event (peer_pad, event);
  }

  gst_object_unref (peer_pad);
}
コード例 #5
0
static void
gst_video_filter2_init (GstVideoFilter2 * videofilter2,
    GstVideoFilter2Class * videofilter2_class)
{

  gst_base_transform_set_qos_enabled (GST_BASE_TRANSFORM (videofilter2), TRUE);
}
コード例 #6
0
static gboolean
audioresample_query (GstPad * pad, GstQuery * query)
{
  GstAudioresample *audioresample =
      GST_AUDIORESAMPLE (gst_pad_get_parent (pad));
  GstBaseTransform *trans = GST_BASE_TRANSFORM (audioresample);
  gboolean res = TRUE;

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_LATENCY:
    {
      GstClockTime min, max;
      gboolean live;
      guint64 latency;
      GstPad *peer;
      gint rate = audioresample->i_rate;
      gint resampler_latency = audioresample->filter_length / 2;

      if (gst_base_transform_is_passthrough (trans))
        resampler_latency = 0;

      if ((peer = gst_pad_get_peer (trans->sinkpad))) {
        if ((res = gst_pad_query (peer, query))) {
          gst_query_parse_latency (query, &live, &min, &max);

          GST_DEBUG ("Peer latency: min %"
              GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
              GST_TIME_ARGS (min), GST_TIME_ARGS (max));

          /* add our own latency */
          if (rate != 0 && resampler_latency != 0)
            latency =
                gst_util_uint64_scale (resampler_latency, GST_SECOND, rate);
          else
            latency = 0;

          GST_DEBUG ("Our latency: %" GST_TIME_FORMAT, GST_TIME_ARGS (latency));

          min += latency;
          if (max != GST_CLOCK_TIME_NONE)
            max += latency;

          GST_DEBUG ("Calculated total latency : min %"
              GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
              GST_TIME_ARGS (min), GST_TIME_ARGS (max));

          gst_query_set_latency (query, live, min, max);
        }
        gst_object_unref (peer);
      }
      break;
    }
    default:
      res = gst_pad_query_default (pad, query);
      break;
  }
  gst_object_unref (audioresample);
  return res;
}
コード例 #7
0
static void
gst_caps_setter_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec)
{
  GstCapsSetter *filter;

  g_return_if_fail (GST_IS_CAPS_SETTER (object));
  filter = GST_CAPS_SETTER (object);

  switch (prop_id) {
    case PROP_CAPS:{
      GstCaps *new_caps;
      const GstCaps *new_caps_val = gst_value_get_caps (value);
      gint i;

      if (new_caps_val == NULL) {
        new_caps = gst_caps_new_any ();
      } else {
        new_caps = gst_caps_copy (new_caps_val);
      }

      for (i = 0; new_caps && (i < gst_caps_get_size (new_caps)); ++i) {
        GstStructure *s;

        s = gst_caps_get_structure (new_caps, i);
        if (!gst_structure_foreach (s, gst_caps_is_fixed_foreach, NULL)) {
          GST_ERROR_OBJECT (filter, "rejected unfixed caps: %" GST_PTR_FORMAT,
              new_caps);
          gst_caps_unref (new_caps);
          new_caps = NULL;
          break;
        }
      }

      if (new_caps) {
        GST_OBJECT_LOCK (filter);
        gst_caps_replace (&filter->caps, new_caps);
        /* drop extra ref */
        gst_caps_unref (new_caps);
        GST_OBJECT_UNLOCK (filter);

        GST_DEBUG_OBJECT (filter, "set new caps %" GST_PTR_FORMAT, new_caps);
      }

      /* try to activate these new caps next time around */
      gst_base_transform_reconfigure (GST_BASE_TRANSFORM (filter));
      break;
    }
    case PROP_JOIN:
      filter->join = g_value_get_boolean (value);
      break;
    case PROP_REPLACE:
      filter->replace = g_value_get_boolean (value);
      break;
    default:
      G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
      break;
  }
}
コード例 #8
0
static GstFlowReturn
gst_quarktv_transform_frame (GstVideoFilter * vfilter, GstVideoFrame * in_frame,
                             GstVideoFrame * out_frame)
{
    GstQuarkTV *filter = GST_QUARKTV (vfilter);
    gint area;
    guint32 *src, *dest;
    GstClockTime timestamp;
    GstBuffer **planetable;
    gint planes, current_plane;

    timestamp = GST_BUFFER_TIMESTAMP (in_frame->buffer);
    timestamp =
        gst_segment_to_stream_time (&GST_BASE_TRANSFORM (vfilter)->segment,
                                    GST_FORMAT_TIME, timestamp);

    GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT,
                      GST_TIME_ARGS (timestamp));

    if (GST_CLOCK_TIME_IS_VALID (timestamp))
        gst_object_sync_values (GST_OBJECT (filter), timestamp);

    if (G_UNLIKELY (filter->planetable == NULL))
        return GST_FLOW_FLUSHING;

    src = GST_VIDEO_FRAME_PLANE_DATA (in_frame, 0);
    dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0);

    GST_OBJECT_LOCK (filter);
    area = filter->area;
    planetable = filter->planetable;
    planes = filter->planes;
    current_plane = filter->current_plane;

    if (planetable[current_plane])
        gst_buffer_unref (planetable[current_plane]);
    planetable[current_plane] = gst_buffer_ref (in_frame->buffer);

    /* For each pixel */
    while (--area) {
        GstBuffer *rand;

        /* pick a random buffer */
        rand = planetable[(current_plane + (fastrand () >> 24)) % planes];

        /* Copy the pixel from the random buffer to dest, FIXME, slow */
        if (rand)
            gst_buffer_extract (rand, area * 4, &dest[area], 4);
        else
            dest[area] = src[area];
    }

    filter->current_plane--;
    if (filter->current_plane < 0)
        filter->current_plane = planes - 1;
    GST_OBJECT_UNLOCK (filter);

    return GST_FLOW_OK;
}
コード例 #9
0
static gboolean
gst_pixbufscale_set_info (GstVideoFilter * filter, GstCaps * in,
    GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info)
{
  if (in_info->width == out_info->width && in_info->height == out_info->height) {
    gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (filter), TRUE);
  } else {
    gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (filter), FALSE);
  }

  GST_DEBUG_OBJECT (filter, "from=%dx%d, size %" G_GSIZE_FORMAT
      " -> to=%dx%d, size %" G_GSIZE_FORMAT,
      in_info->width, in_info->height, in_info->size,
      out_info->width, out_info->height, out_info->size);

  return TRUE;
}
コード例 #10
0
ファイル: gstcapsfilter.c プロジェクト: cablelabs/gstreamer
static void
gst_capsfilter_init (GstCapsFilter * filter)
{
  GstBaseTransform *trans = GST_BASE_TRANSFORM (filter);
  gst_base_transform_set_gap_aware (trans, TRUE);
  gst_base_transform_set_prefer_passthrough (trans, FALSE);
  filter->filter_caps = gst_caps_new_any ();
}
コード例 #11
0
/* initialize the new element
 * instantiate pads and add them to element
 * set pad calback functions
 * initialize instance structure
 */
static void
gst_skin_detect_init (GstSkinDetect * filter)
{
  filter->postprocess = TRUE;
  filter->method = HSV;

  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), FALSE);
}
コード例 #12
0
ファイル: gsttidmaiaccel.c プロジェクト: sv99/gst-ti-dmai
/******************************************************************************
 * gst_tidmaiaccel_init
 *****************************************************************************/
static void gst_tidmaiaccel_init (GstTIDmaiaccel *dmaiaccel)
{
    gst_base_transform_set_qos_enabled (GST_BASE_TRANSFORM (dmaiaccel), TRUE);
    dmaiaccel->colorSpace = ColorSpace_NOTSET;
    dmaiaccel->width = 0;
    dmaiaccel->height = 0;
    dmaiaccel->bufTabAllocated = FALSE;
}
コード例 #13
0
static gboolean
gst_audio_fx_base_fir_filter_query (GstPad * pad, GstQuery * query)
{
  GstAudioFXBaseFIRFilter *self =
      GST_AUDIO_FX_BASE_FIR_FILTER (gst_pad_get_parent (pad));
  gboolean res = TRUE;

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_LATENCY:
    {
      GstClockTime min, max;
      gboolean live;
      guint64 latency;
      GstPad *peer;
      gint rate = GST_AUDIO_FILTER (self)->format.rate;

      if (rate == 0) {
        res = FALSE;
      } else if ((peer = gst_pad_get_peer (GST_BASE_TRANSFORM (self)->sinkpad))) {
        if ((res = gst_pad_query (peer, query))) {
          gst_query_parse_latency (query, &live, &min, &max);

          GST_DEBUG_OBJECT (self, "Peer latency: min %"
              GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
              GST_TIME_ARGS (min), GST_TIME_ARGS (max));

          if (self->fft && !self->low_latency)
            latency = self->block_length - self->kernel_length + 1;
          else
            latency = self->latency;

          /* add our own latency */
          latency = gst_util_uint64_scale_round (latency, GST_SECOND, rate);

          GST_DEBUG_OBJECT (self, "Our latency: %"
              GST_TIME_FORMAT, GST_TIME_ARGS (latency));

          min += latency;
          if (max != GST_CLOCK_TIME_NONE)
            max += latency;

          GST_DEBUG_OBJECT (self, "Calculated total latency : min %"
              GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
              GST_TIME_ARGS (min), GST_TIME_ARGS (max));

          gst_query_set_latency (query, live, min, max);
        }
        gst_object_unref (peer);
      }
      break;
    }
    default:
      res = gst_pad_query_default (pad, query);
      break;
  }
  gst_object_unref (self);
  return res;
}
コード例 #14
0
static void
gst_alpha_color_init (GstMillColor * mc, GstMillColorClass * g_class)
{
  GstBaseTransform *btrans = NULL;

  btrans = GST_BASE_TRANSFORM (mc);

  btrans->always_in_place = TRUE;
}
コード例 #15
0
ファイル: transform1.c プロジェクト: pexip/gstreamer
static void
toggle_passthrough (gpointer data, gpointer user_data)
{
  GstBaseTransform *basetrans = GST_BASE_TRANSFORM (user_data);

  gst_base_transform_set_passthrough (basetrans, TRUE);
  g_thread_yield ();
  gst_base_transform_set_passthrough (basetrans, FALSE);
}
コード例 #16
0
ファイル: gstcvsmooth.c プロジェクト: joshdoe/gst-opencv
static void
gst_cv_smooth_change_type (GstCvSmooth * filter, gint value)
{
  GST_DEBUG_OBJECT (filter, "Changing type from %d to %d", filter->type, value);
  if (filter->type == value)
    return;

  filter->type = value;
  switch (value) {
    case CV_GAUSSIAN:
    case CV_BLUR:
      gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), TRUE);
      break;
    default:
      gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), FALSE);
      break;
  }
}
コード例 #17
0
static void
gst_cv_sobel_init (GstCvSobel * filter)
{
  filter->x_order = DEFAULT_X_ORDER;
  filter->y_order = DEFAULT_Y_ORDER;
  filter->aperture_size = DEFAULT_APERTURE_SIZE;

  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), FALSE);
}
コード例 #18
0
/* initialize the new element
 * instantiate pads and add them to element
 * set pad calback functions
 * initialize instance structure
 */
static void
gst_segmentation_init (GstSegmentation * filter)
{
  filter->method = DEFAULT_METHOD;
  filter->test_mode = DEFAULT_TEST_MODE;
  filter->framecount = 0;
  filter->learning_rate = DEFAULT_LEARNING_RATE;
  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), TRUE);
}
コード例 #19
0
static void
gst_chromaprint_init (GstChromaprint * chromaprint)
{
  gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (chromaprint), TRUE);

  chromaprint->context = chromaprint_new (CHROMAPRINT_ALGORITHM_DEFAULT);
  chromaprint->fingerprint = NULL;
  chromaprint->max_duration = DEFAULT_MAX_DURATION;
  gst_chromaprint_reset (chromaprint);
}
コード例 #20
0
static void gst_tcamwhitebalance_init (GstTcamWhitebalance* self)
{
    gst_base_transform_set_in_place(GST_BASE_TRANSFORM(self), TRUE);

    init_wb_values(self);
    self->auto_wb = TRUE;

    self->image_size.width = 0;
    self->image_size.height = 0;
}
コード例 #21
0
static void
gst_ffmpegaudioresample_init (GstFFMpegAudioResample * resample,
    GstFFMpegAudioResampleClass * klass)
{
  GstBaseTransform *trans = GST_BASE_TRANSFORM (resample);

  gst_pad_set_bufferalloc_function (trans->sinkpad, NULL);

  resample->res = NULL;
}
コード例 #22
0
static void
gst_progress_report_init (GstProgressReport * report)
{
  gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (report), TRUE);

  report->update_freq = DEFAULT_UPDATE_FREQ;
  report->silent = DEFAULT_SILENT;
  report->do_query = DEFAULT_DO_QUERY;
  report->format = g_strdup (DEFAULT_FORMAT);
}
コード例 #23
0
ファイル: audioecho.c プロジェクト: TheBigW/gst-plugins-good
static void
gst_audio_echo_init (GstAudioEcho * self, GstAudioEchoClass * klass)
{
  self->delay = 1;
  self->max_delay = 1;
  self->intensity = 0.0;
  self->feedback = 0.0;

  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (self), TRUE);
}
コード例 #24
0
static void
gst_ffmpegscale_init (GstFFMpegScale * scale, GstFFMpegScaleClass * klass)
{
  GstBaseTransform *trans = GST_BASE_TRANSFORM (scale);

  gst_pad_set_event_function (trans->srcpad, gst_ffmpegscale_handle_src_event);

  scale->pixfmt = PIX_FMT_NB;
  scale->res = NULL;
}
コード例 #25
0
ファイル: gstofa.c プロジェクト: collects/gst-plugins-bad
static void
gst_ofa_init (GstOFA * ofa, GstOFAClass * g_class)
{
  gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (ofa), TRUE);

  ofa->fingerprint = NULL;
  ofa->record = TRUE;

  ofa->adapter = gst_adapter_new ();
}
コード例 #26
0
static void
gst_rg_limiter_init (GstRgLimiter * filter, GstRgLimiterClass * gclass)
{
  GstBaseTransform *base = GST_BASE_TRANSFORM (filter);

  gst_base_transform_set_passthrough (base, FALSE);
  gst_base_transform_set_gap_aware (base, TRUE);

  filter->enabled = TRUE;
}
コード例 #27
0
static void
gst_audio_panorama_init (GstAudioPanorama * filter)
{

  filter->panorama = 0;
  filter->method = METHOD_PSYCHOACOUSTIC;
  gst_audio_info_init (&filter->info);
  filter->process = NULL;

  gst_base_transform_set_gap_aware (GST_BASE_TRANSFORM (filter), TRUE);
}
static void webkit_media_playready_decrypt_init(WebKitMediaPlayReadyDecrypt* self)
{
    GstBaseTransform* base = GST_BASE_TRANSFORM(self);

    gst_base_transform_set_in_place(base, TRUE);
    gst_base_transform_set_passthrough(base, FALSE);
    gst_base_transform_set_gap_aware(base, FALSE);

    g_mutex_init(&self->mutex);
    g_cond_init(&self->condition);
}
コード例 #29
0
static void
gst_video_balance_update_properties (GstVideoBalance * videobalance)
{
  gboolean passthrough = gst_video_balance_is_passthrough (videobalance);
  GstBaseTransform *base = GST_BASE_TRANSFORM (videobalance);

  base->passthrough = passthrough;

  if (!passthrough)
    gst_video_balance_update_tables (videobalance);
}
コード例 #30
0
ファイル: gstcvsmooth.c プロジェクト: joshdoe/gst-opencv
/* initialize the new element
 * instantiate pads and add them to element
 * set pad callback functions
 * initialize instance structure
 */
static void
gst_cv_smooth_init (GstCvSmooth * filter, GstCvSmoothClass * gclass)
{
  filter->type = DEFAULT_CV_SMOOTH_TYPE;
  filter->param1 = DEFAULT_PARAM1;
  filter->param2 = DEFAULT_PARAM2;
  filter->param3 = DEFAULT_PARAM3;
  filter->param4 = DEFAULT_PARAM4;

  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (filter), FALSE);
}