Beispiel #1
2
static void
gst_nle_source_setup (GstNleSource * nlesrc)
{
  GstElement *rotate, *videorate, *videoscale, *colorspace, *vident, *cairooverlay, *colorspace2;
  GstElement *audiorate, *audioconvert, *audioresample, *aident;
  GstElement *a_capsfilter, *v_capsfilter, *last;
  GstPad *v_pad, *a_pad;
  GstCaps *v_caps, *a_caps;

  rotate = gst_element_factory_make ("flurotate", NULL);
  videorate = gst_element_factory_make ("videorate", NULL);
  nlesrc->videocrop = gst_element_factory_make ("videocrop", NULL);
  videoscale = gst_element_factory_make ("videoscale", NULL);
  colorspace = gst_element_factory_make ("ffmpegcolorspace", NULL);
  v_capsfilter = gst_element_factory_make ("capsfilter", "video_capsfilter");
  nlesrc->textoverlay = gst_element_factory_make ("textoverlay", NULL);
  cairooverlay = gst_element_factory_make ("cairooverlay", "overlay");
  colorspace2 = gst_element_factory_make ("ffmpegcolorspace", NULL);

  vident = gst_element_factory_make ("identity", NULL);

  v_caps = gst_caps_new_simple ("video/x-raw-yuv",
      "format", GST_TYPE_FOURCC, GST_STR_FOURCC ("I420"),
      "width", G_TYPE_INT, (gint) nlesrc->width,
      "height", G_TYPE_INT, (gint) nlesrc->height,
      "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
      "framerate", GST_TYPE_FRACTION,
      (gint) nlesrc->fps_n, (gint) nlesrc->fps_d, NULL);

  if (rotate) {
    gst_caps_set_simple (v_caps, "rotation", G_TYPE_INT, (gint) 0, NULL);
  } else {
    rotate = gst_element_factory_make ("identity", NULL); 
  }

  gst_pad_set_caps (nlesrc->video_srcpad, v_caps);

  g_object_set (videoscale, "add-borders", TRUE, NULL);
  g_object_set (vident, "single-segment", TRUE, NULL);
  g_object_set (v_capsfilter, "caps", v_caps, NULL);
  g_object_set (nlesrc->textoverlay, "valignment", 2, "halignment", 0,
      "auto-resize", TRUE, "wrap-mode", 0, "silent", !nlesrc->overlay_title,
      NULL);

  g_signal_connect (cairooverlay, "draw",
      G_CALLBACK (gst_nle_source_draw_overlay), nlesrc);

  /* As videorate can duplicate a lot of buffers we want to put it last in this
     transformation bin */
  gst_bin_add_many (GST_BIN (nlesrc), rotate, nlesrc->videocrop,
      videoscale, colorspace, nlesrc->textoverlay, videorate, v_capsfilter,
      vident, NULL);
  /* cairooverlay forces a colorpsace conversion ro RGB that we want to avoid
   * when we are not rendering the watermark */
  if (nlesrc->watermark != NULL) {
    gst_bin_add_many (GST_BIN (nlesrc), cairooverlay, colorspace2, NULL);
  }

  gst_element_link_many (rotate, nlesrc->videocrop, videoscale, colorspace,
      nlesrc->textoverlay, NULL);

  if (nlesrc->watermark != NULL) {
    gst_element_link_many (nlesrc->textoverlay, cairooverlay, colorspace2, NULL);
    last = colorspace2;
  } else {
    last = nlesrc->textoverlay;
  }

  gst_element_link_many (last, videorate, v_capsfilter, vident, NULL);

  /* Ghost source and sink pads */
  v_pad = gst_element_get_pad (vident, "src");
  gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->video_srcpad), v_pad);
  gst_object_unref (v_pad);

  v_pad = gst_element_get_pad (rotate, "sink");
  gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->video_sinkpad), v_pad);
  gst_object_unref (v_pad);

  if (nlesrc->with_audio) {
    audiorate = gst_element_factory_make ("audiorate", NULL);
    audioconvert = gst_element_factory_make ("audioconvert", NULL);
    audioresample = gst_element_factory_make ("audioresample", NULL);
    a_capsfilter = gst_element_factory_make ("capsfilter", NULL);
    aident = gst_element_factory_make ("identity", NULL);

    gst_bin_add_many (GST_BIN (nlesrc), audioresample, audioconvert,
        audiorate, a_capsfilter, aident, NULL);
    gst_element_link_many (audioconvert, audioresample,
        audiorate, a_capsfilter, aident, NULL);

    a_caps = gst_nle_source_get_audio_caps (nlesrc);
    gst_pad_set_caps (nlesrc->audio_srcpad, a_caps);
    g_object_set (a_capsfilter, "caps", a_caps, NULL);

    g_object_set (aident, "single-segment", TRUE, NULL);

    /* Ghost sink and source pads */
    a_pad = gst_element_get_pad (aident, "src");
    gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->audio_srcpad), a_pad);
    gst_object_unref (a_pad);

    a_pad = gst_element_get_pad (audioconvert, "sink");
    gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->audio_sinkpad), a_pad);
    gst_object_unref (a_pad);
  }
  nlesrc->index = -1;
  nlesrc->accu_time = 0;
  nlesrc->video_srcpad_added = FALSE;
  nlesrc->audio_srcpad_added = FALSE;
}
static GstFlowReturn
gst_speex_enc_handle_frame (GstAudioEncoder * benc, GstBuffer * buf)
{
  GstSpeexEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;

  enc = GST_SPEEX_ENC (benc);

  if (!enc->header_sent) {
    /* Speex streams begin with two headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.
       We merely need to make the headers, then pass them to libspeex 
       one at a time; libspeex handles the additional Ogg bitstream 
       constraints */
    GstBuffer *buf1, *buf2;
    GstCaps *caps;
    guchar *data;
    gint data_len;

    /* create header buffer */
    data = (guint8 *) speex_header_to_packet (&enc->header, &data_len);
    buf1 = gst_buffer_new ();
    GST_BUFFER_DATA (buf1) = GST_BUFFER_MALLOCDATA (buf1) = data;
    GST_BUFFER_SIZE (buf1) = data_len;
    GST_BUFFER_OFFSET_END (buf1) = 0;
    GST_BUFFER_OFFSET (buf1) = 0;

    /* create comment buffer */
    buf2 = gst_speex_enc_create_metadata_buffer (enc);

    /* mark and put on caps */
    caps = gst_caps_new_simple ("audio/x-speex", "rate", G_TYPE_INT, enc->rate,
        "channels", G_TYPE_INT, enc->channels, NULL);
    caps = _gst_caps_set_buffer_array (caps, "streamheader", buf1, buf2, NULL);

    /* negotiate with these caps */
    GST_DEBUG_OBJECT (enc, "here are the caps: %" GST_PTR_FORMAT, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_caps_unref (caps);

    /* push out buffers */
    ret = gst_speex_enc_push_buffer (enc, buf1);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      goto done;
    }

    ret = gst_speex_enc_push_buffer (enc, buf2);

    if (ret != GST_FLOW_OK)
      goto done;

    enc->header_sent = TRUE;
  }

  GST_DEBUG_OBJECT (enc, "received buffer %p of %u bytes", buf,
      buf ? GST_BUFFER_SIZE (buf) : 0);

  ret = gst_speex_enc_encode (enc, buf);

done:
  return ret;
}
static GstCaps *
gst_dshowvideodec_src_getcaps (GstPad * pad)
{
  GstDshowVideoDec *vdec = (GstDshowVideoDec *) gst_pad_get_parent (pad);
  GstCaps *caps = NULL;

  if (!vdec->srccaps)
    vdec->srccaps = gst_caps_new_empty ();

  if (vdec->decfilter) {
    CComPtr<IPin> output_pin;
    CComPtr<IEnumMediaTypes> enum_mediatypes;
    HRESULT hres;
    ULONG fetched;

    output_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_OUTPUT);
    if (!output_pin) {
      GST_ELEMENT_ERROR (vdec, STREAM, FAILED,
          ("failed getting ouput pin from the decoder"), (NULL));
      goto beach;
    }

    hres = output_pin->EnumMediaTypes (&enum_mediatypes);
    if (hres == S_OK && enum_mediatypes) {
      AM_MEDIA_TYPE *mediatype = NULL;

      enum_mediatypes->Reset();
      while (hres =
          enum_mediatypes->Next(1, &mediatype, &fetched),
          hres == S_OK) 
      {
        VIDEOINFOHEADER *video_info;
        GstCaps *mediacaps = NULL;

        /* RGB24 */
        if (IsEqualGUID (mediatype->subtype, MEDIASUBTYPE_RGB24) &&
            IsEqualGUID (mediatype->formattype, FORMAT_VideoInfo))
        {
          video_info = (VIDEOINFOHEADER *) mediatype->pbFormat;

          /* ffmpegcolorspace handles RGB24 in BIG_ENDIAN */
          mediacaps = gst_caps_new_simple ("video/x-raw-rgb",
              "bpp", G_TYPE_INT, 24,
              "depth", G_TYPE_INT, 24,
              "width", G_TYPE_INT, video_info->bmiHeader.biWidth,
              "height", G_TYPE_INT, video_info->bmiHeader.biHeight,
              "framerate", GST_TYPE_FRACTION,
              (int) (10000000 / video_info->AvgTimePerFrame), 1, "endianness",
              G_TYPE_INT, G_BIG_ENDIAN, "red_mask", G_TYPE_INT, 255,
              "green_mask", G_TYPE_INT, 65280, "blue_mask", G_TYPE_INT,
              16711680, NULL);

          if (mediacaps) {
            vdec->mediatypes = g_list_append (vdec->mediatypes, mediatype);
            gst_caps_append (vdec->srccaps, mediacaps);
          } else {
            DeleteMediaType (mediatype);
          }
        } else {
          DeleteMediaType (mediatype);
        }

      }
    }
  }

  if (vdec->srccaps)
    caps = gst_caps_ref (vdec->srccaps);

beach:
  gst_object_unref (vdec);

  return caps;
}
Beispiel #4
0
static GstFlowReturn
theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
{
  GstCaps *caps;
  gint par_num, par_den;
  GstFlowReturn ret = GST_FLOW_OK;
  GList *walk;
  guint32 fourcc;

  GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d",
      dec->info.fps_numerator, dec->info.fps_denominator,
      dec->info.aspect_numerator, dec->info.aspect_denominator);

  /* calculate par
   * the info.aspect_* values reflect PAR;
   * 0:x and x:0 are allowed and can be interpreted as 1:1.
   */
  if (dec->have_par) {
    /* we had a par on the sink caps, override the encoded par */
    GST_DEBUG_OBJECT (dec, "overriding with input PAR");
    par_num = dec->par_num;
    par_den = dec->par_den;
  } else {
    /* take encoded par */
    par_num = dec->info.aspect_numerator;
    par_den = dec->info.aspect_denominator;
  }
  if (par_num == 0 || par_den == 0) {
    par_num = par_den = 1;
  }
  /* theora has:
   *
   *  width/height : dimension of the encoded frame 
   *  pic_width/pic_height : dimension of the visible part
   *  pic_x/pic_y : offset in encoded frame where visible part starts
   */
  GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width,
      dec->info.pic_height, par_num, par_den);
  GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d",
      dec->info.pic_width, dec->info.pic_height,
      dec->info.pic_x, dec->info.pic_y);

  if (dec->info.pixel_fmt == TH_PF_420) {
    dec->output_bpp = 12;       /* Average bits per pixel. */
    fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
  } else if (dec->info.pixel_fmt == TH_PF_422) {
    dec->output_bpp = 16;
    fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
  } else if (dec->info.pixel_fmt == TH_PF_444) {
    dec->output_bpp = 24;
    fourcc = GST_MAKE_FOURCC ('Y', '4', '4', '4');
  } else {
    GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt);
    return GST_FLOW_ERROR;
  }

  if (dec->crop) {
    dec->width = dec->info.pic_width;
    dec->height = dec->info.pic_height;
    dec->offset_x = dec->info.pic_x;
    dec->offset_y = dec->info.pic_y;
    /* Ensure correct offsets in chroma for formats that need it
     * by rounding the offset. libtheora will add proper pixels,
     * so no need to handle them ourselves. */
    if (dec->offset_x & 1 && dec->info.pixel_fmt != TH_PF_444) {
      dec->offset_x--;
      dec->width++;
    }
    if (dec->offset_y & 1 && dec->info.pixel_fmt == TH_PF_420) {
      dec->offset_y--;
      dec->height++;
    }
  } else {
    /* no cropping, use the encoded dimensions */
    dec->width = dec->info.frame_width;
    dec->height = dec->info.frame_height;
    dec->offset_x = 0;
    dec->offset_y = 0;
  }

  GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d",
      dec->width, dec->height, dec->offset_x, dec->offset_y);

  /* done */
  dec->decoder = th_decode_alloc (&dec->info, dec->setup);

  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV,
          &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MV visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE,
          &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI,
          &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS,
          &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation");
  }

  caps = gst_caps_new_simple ("video/x-raw-yuv",
      "format", GST_TYPE_FOURCC, fourcc,
      "framerate", GST_TYPE_FRACTION,
      dec->info.fps_numerator, dec->info.fps_denominator,
      "pixel-aspect-ratio", GST_TYPE_FRACTION, par_num, par_den,
      "width", G_TYPE_INT, dec->width, "height", G_TYPE_INT, dec->height,
      "color-matrix", G_TYPE_STRING, "sdtv",
      "chroma-site", G_TYPE_STRING, "jpeg", NULL);
  gst_pad_set_caps (dec->srcpad, caps);
  gst_caps_unref (caps);

  dec->have_header = TRUE;

  if (dec->pendingevents) {
    for (walk = dec->pendingevents; walk; walk = g_list_next (walk))
      gst_pad_push_event (dec->srcpad, GST_EVENT_CAST (walk->data));
    g_list_free (dec->pendingevents);
    dec->pendingevents = NULL;
  }

  if (dec->tags) {
    gst_element_found_tags_for_pad (GST_ELEMENT_CAST (dec), dec->srcpad,
        dec->tags);
    dec->tags = NULL;
  }

  return ret;
}
/* create a socket for connecting to remote server */
static gboolean
gst_neonhttp_src_start (GstBaseSrc * bsrc)
{
  GstNeonhttpSrc *src = GST_NEONHTTP_SRC (bsrc);
  const gchar *content_length;
  gint res;

#ifndef GST_DISABLE_GST_DEBUG
  if (src->neon_http_debug)
    ne_debug_init (stderr, NE_DBG_HTTP);
#endif

  ne_oom_callback (oom_callback);

  res = ne_sock_init ();
  if (res != 0)
    goto init_failed;

  res = gst_neonhttp_src_send_request_and_redirect (src,
      &src->session, &src->request, 0, src->automatic_redirect);

  if (res != NE_OK || !src->session) {
    if (res == HTTP_SOCKET_ERROR) {
      goto socket_error;
    } else if (res == HTTP_REQUEST_WRONG_PROXY) {
      goto wrong_proxy;
    } else {
      goto begin_req_failed;
    }
  }

  content_length = ne_get_response_header (src->request, "Content-Length");

  if (content_length)
    src->content_size = g_ascii_strtoull (content_length, NULL, 10);
  else
    src->content_size = -1;

  if (src->iradio_mode) {
    /* Icecast stuff */
    const gchar *str_value;
    gint gint_value;

    str_value = ne_get_response_header (src->request, "icy-metaint");
    if (str_value) {
      if (sscanf (str_value, "%d", &gint_value) == 1) {
        if (src->icy_caps) {
          gst_caps_unref (src->icy_caps);
          src->icy_caps = NULL;
        }
        src->icy_metaint = gint_value;
        src->icy_caps = gst_caps_new_simple ("application/x-icy",
            "metadata-interval", G_TYPE_INT, src->icy_metaint, NULL);
      }
    }

    str_value = ne_get_response_header (src->request, "icy-name");
    if (str_value) {
      if (src->iradio_name) {
        g_free (src->iradio_name);
        src->iradio_name = NULL;
      }
      src->iradio_name = gst_neonhttp_src_unicodify (str_value);
    }
    str_value = ne_get_response_header (src->request, "icy-genre");
    if (str_value) {
      if (src->iradio_genre) {
        g_free (src->iradio_genre);
        src->iradio_genre = NULL;
      }
      src->iradio_genre = gst_neonhttp_src_unicodify (str_value);
    }
    str_value = ne_get_response_header (src->request, "icy-url");
    if (str_value) {
      if (src->iradio_url) {
        g_free (src->iradio_url);
        src->iradio_url = NULL;
      }
      src->iradio_url = gst_neonhttp_src_unicodify (str_value);
    }
  }

  return TRUE;

  /* ERRORS */
init_failed:
  {
    GST_ELEMENT_ERROR (src, LIBRARY, INIT, (NULL),
        ("ne_sock_init() failed: %d", res));
    return FALSE;
  }
socket_error:
  {
    GST_ELEMENT_ERROR (src, RESOURCE, OPEN_READ, (NULL),
        ("HTTP Request failed when opening socket: %d", res));
    return FALSE;
  }
wrong_proxy:
  {
    GST_ELEMENT_ERROR (src, RESOURCE, SETTINGS, (NULL),
        ("Proxy Server URI is invalid - make sure that either both proxy host "
            "and port are specified or neither."));
    return FALSE;
  }
begin_req_failed:
  {
    GST_ELEMENT_ERROR (src, RESOURCE, OPEN_READ, (NULL),
        ("Could not begin request: %d", res));
    return FALSE;
  }
}
Beispiel #6
0
static void
setup (void)
{
  list = gst_buffer_list_new ();
  caps = gst_caps_new_simple ("text/plain", NULL);
}
static GstCaps *
gst_dshowaudiosrc_getcaps_from_streamcaps (GstDshowAudioSrc * src, IPin * pin,
    IAMStreamConfig * streamcaps)
{
  GstCaps *caps = NULL;
  HRESULT hres = S_OK;
  int icount = 0;
  int isize = 0;
  AUDIO_STREAM_CONFIG_CAPS ascc;
  int i = 0;

  if (!streamcaps)
    return NULL;

  streamcaps->GetNumberOfCapabilities (&icount, &isize);

  if (isize != sizeof (ascc))
    return NULL;

  for (; i < icount; i++) {
    GstCapturePinMediaType *pin_mediatype = g_new0 (GstCapturePinMediaType, 1);

    pin->AddRef ();
    pin_mediatype->capture_pin = pin;

    hres = streamcaps->GetStreamCaps (i, &pin_mediatype->mediatype,
        (BYTE *) & ascc);
    if (hres == S_OK && pin_mediatype->mediatype) {
      GstCaps *mediacaps = NULL;

      if (!caps)
        caps = gst_caps_new_empty ();

      if (gst_dshow_check_mediatype (pin_mediatype->mediatype, MEDIASUBTYPE_PCM,
              FORMAT_WaveFormatEx)) {
        WAVEFORMATEX *wavformat =
            (WAVEFORMATEX *) pin_mediatype->mediatype->pbFormat;
        mediacaps =
            gst_caps_new_simple ("audio/x-raw-int", "width", G_TYPE_INT,
            wavformat->wBitsPerSample, "depth", G_TYPE_INT,
            wavformat->wBitsPerSample, "endianness", G_TYPE_INT, G_BYTE_ORDER,
            "signed", G_TYPE_BOOLEAN, TRUE, "channels", G_TYPE_INT,
            wavformat->nChannels, "rate", G_TYPE_INT, wavformat->nSamplesPerSec,
            NULL);

        if (mediacaps) {
          src->pins_mediatypes =
              g_list_append (src->pins_mediatypes, pin_mediatype);
          gst_caps_append (caps, mediacaps);
        } else {
          gst_dshow_free_pin_mediatype (pin_mediatype);
        }
      } else {
        gst_dshow_free_pin_mediatype (pin_mediatype);
      }
    } else {
      gst_dshow_free_pin_mediatype (pin_mediatype);
    }
  }

  if (caps && gst_caps_is_empty (caps)) {
    gst_caps_unref (caps);
    caps = NULL;
  }

  return caps;
}
static GstBuffer *
gst_rtp_qdm2_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
{
  GstRtpQDM2Depay *rtpqdm2depay;
  GstBuffer *outbuf = NULL;
  guint16 seq;
  GstRTPBuffer rtp = { NULL };

  rtpqdm2depay = GST_RTP_QDM2_DEPAY (depayload);

  {
    gint payload_len;
    guint8 *payload;
    guint avail;
    guint pos = 0;

    gst_rtp_buffer_map (buf, GST_MAP_READ, &rtp);
    payload_len = gst_rtp_buffer_get_payload_len (&rtp);
    if (payload_len < 3)
      goto bad_packet;

    payload = gst_rtp_buffer_get_payload (&rtp);
    seq = gst_rtp_buffer_get_seq (&rtp);
    if (G_UNLIKELY (seq != rtpqdm2depay->nextseq)) {
      GST_DEBUG ("GAP in sequence number, Resetting data !");
      /* Flush previous data */
      flush_data (rtpqdm2depay);
      /* And store new timestamp */
      rtpqdm2depay->ptimestamp = rtpqdm2depay->timestamp;
      rtpqdm2depay->timestamp = GST_BUFFER_TIMESTAMP (buf);
      /* And that previous data will be pushed at the bottom */
    }
    rtpqdm2depay->nextseq = seq + 1;

    GST_DEBUG ("Payload size %d 0x%x sequence:%d", payload_len, payload_len,
        seq);

    GST_MEMDUMP ("Incoming payload", payload, payload_len);

    while (pos < payload_len) {
      switch (payload[pos]) {
        case 0x80:{
          GST_DEBUG ("Unrecognized 0x80 marker, skipping 12 bytes");
          pos += 12;
        }
          break;
        case 0xff:
          /* HEADERS */
          GST_DEBUG ("Headers");
          /* Store the incoming timestamp */
          rtpqdm2depay->ptimestamp = rtpqdm2depay->timestamp;
          rtpqdm2depay->timestamp = GST_BUFFER_TIMESTAMP (buf);
          /* flush the internal data if needed */
          flush_data (rtpqdm2depay);
          if (G_UNLIKELY (!rtpqdm2depay->configured)) {
            guint8 *ourdata;
            GstBuffer *codecdata;
            GstMapInfo cmap;
            GstCaps *caps;

            /* First bytes are unknown */
            GST_MEMDUMP ("Header", payload + pos, 32);
            ourdata = payload + pos + 10;
            pos += 10;
            rtpqdm2depay->channs = GST_READ_UINT32_BE (payload + pos + 4);
            rtpqdm2depay->samplerate = GST_READ_UINT32_BE (payload + pos + 8);
            rtpqdm2depay->bitrate = GST_READ_UINT32_BE (payload + pos + 12);
            rtpqdm2depay->blocksize = GST_READ_UINT32_BE (payload + pos + 16);
            rtpqdm2depay->framesize = GST_READ_UINT32_BE (payload + pos + 20);
            rtpqdm2depay->packetsize = GST_READ_UINT32_BE (payload + pos + 24);
            /* 16 bit empty block (0x02 0x00) */
            pos += 30;
            GST_DEBUG
                ("channs:%d, samplerate:%d, bitrate:%d, blocksize:%d, framesize:%d, packetsize:%d",
                rtpqdm2depay->channs, rtpqdm2depay->samplerate,
                rtpqdm2depay->bitrate, rtpqdm2depay->blocksize,
                rtpqdm2depay->framesize, rtpqdm2depay->packetsize);

            /* Caps */
            codecdata = gst_buffer_new_and_alloc (48);
            gst_buffer_map (codecdata, &cmap, GST_MAP_WRITE);
            memcpy (cmap.data, headheader, 20);
            memcpy (cmap.data + 20, ourdata, 28);
            gst_buffer_unmap (codecdata, &cmap);

            caps = gst_caps_new_simple ("audio/x-qdm2",
                "samplesize", G_TYPE_INT, 16,
                "rate", G_TYPE_INT, rtpqdm2depay->samplerate,
                "channels", G_TYPE_INT, rtpqdm2depay->channs,
                "codec_data", GST_TYPE_BUFFER, codecdata, NULL);
            gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload), caps);
            gst_caps_unref (caps);
            rtpqdm2depay->configured = TRUE;
          } else {
            GST_DEBUG ("Already configured, skipping headers");
            pos += 40;
          }
          break;
        default:{
          /* Shuffled packet contents */
          guint packetid = payload[pos++];
          guint packettype = payload[pos++];
          guint packlen = payload[pos++];
          guint hsize = 2;

          GST_DEBUG ("Packet id:%d, type:0x%x, len:%d",
              packetid, packettype, packlen);

          /* Packets bigger than 0xff bytes have a type with the high bit set */
          if (G_UNLIKELY (packettype & 0x80)) {
            packettype &= 0x7f;
            packlen <<= 8;
            packlen |= payload[pos++];
            hsize = 3;
            GST_DEBUG ("Packet id:%d, type:0x%x, len:%d",
                packetid, packettype, packlen);
          }

          if (packettype > 0x7f) {
            GST_ERROR ("HOUSTON WE HAVE A PROBLEM !!!!");
          }
          add_packet (rtpqdm2depay, packetid, packlen + hsize,
              payload + pos - hsize);
          pos += packlen;
        }
      }
    }

    GST_DEBUG ("final pos %d", pos);

    avail = gst_adapter_available (rtpqdm2depay->adapter);
    if (G_UNLIKELY (avail)) {
      GST_DEBUG ("Pushing out %d bytes of collected data", avail);
      outbuf = gst_adapter_take_buffer (rtpqdm2depay->adapter, avail);
      GST_BUFFER_TIMESTAMP (outbuf) = rtpqdm2depay->ptimestamp;
      GST_DEBUG ("Outgoing buffer timestamp %" GST_TIME_FORMAT,
          GST_TIME_ARGS (rtpqdm2depay->ptimestamp));
    }
  }

  gst_rtp_buffer_unmap (&rtp);
  return outbuf;

  /* ERRORS */
bad_packet:
  {
    GST_ELEMENT_WARNING (rtpqdm2depay, STREAM, DECODE,
        (NULL), ("Packet was too short"));
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
}
Beispiel #9
0
static GstFlowReturn
gst_pngdec_caps_create_and_set (GstPngDec * pngdec)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstCaps *caps = NULL, *res = NULL;
  GstPadTemplate *templ = NULL;
  gint bpc = 0, color_type;
  png_uint_32 width, height;

  g_return_val_if_fail (GST_IS_PNGDEC (pngdec), GST_FLOW_ERROR);

  /* Get bits per channel */
  bpc = png_get_bit_depth (pngdec->png, pngdec->info);

  /* We don't handle 16 bits per color, strip down to 8 */
  if (bpc == 16) {
    GST_LOG_OBJECT (pngdec,
        "this is a 16 bits per channel PNG image, strip down to 8 bits");
    png_set_strip_16 (pngdec->png);
  }

  /* Get Color type */
  color_type = png_get_color_type (pngdec->png, pngdec->info);

#if 0
  /* We used to have this HACK to reverse the outgoing bytes, but the problem
   * that originally required the hack seems to have been in ffmpegcolorspace's
   * RGBA descriptions. It doesn't seem needed now that's fixed, but might
   * still be needed on big-endian systems, I'm not sure. J.S. 6/7/2007 */
  if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
    png_set_bgr (pngdec->png);
#endif

  /* Gray scale converted to RGB and upscaled to 8 bits */
  if ((color_type == PNG_COLOR_TYPE_GRAY_ALPHA) ||
      (color_type == PNG_COLOR_TYPE_GRAY)) {
    GST_LOG_OBJECT (pngdec, "converting grayscale png to RGB");
    png_set_gray_to_rgb (pngdec->png);
    if (bpc < 8) {              /* Convert to 8 bits */
      GST_LOG_OBJECT (pngdec, "converting grayscale image to 8 bits");
#if PNG_LIBPNG_VER < 10400
      png_set_gray_1_2_4_to_8 (pngdec->png);
#else
      png_set_expand_gray_1_2_4_to_8 (pngdec->png);
#endif
    }
  }

  /* Palette converted to RGB */
  if (color_type == PNG_COLOR_TYPE_PALETTE) {
    GST_LOG_OBJECT (pngdec, "converting palette png to RGB");
    png_set_palette_to_rgb (pngdec->png);
  }

  /* Update the info structure */
  png_read_update_info (pngdec->png, pngdec->info);

  /* Get IHDR header again after transformation settings */

  png_get_IHDR (pngdec->png, pngdec->info, &width, &height,
      &bpc, &pngdec->color_type, NULL, NULL, NULL);

  pngdec->width = width;
  pngdec->height = height;

  GST_LOG_OBJECT (pngdec, "this is a %dx%d PNG image", pngdec->width,
      pngdec->height);

  switch (pngdec->color_type) {
    case PNG_COLOR_TYPE_RGB:
      GST_LOG_OBJECT (pngdec, "we have no alpha channel, depth is 24 bits");
      pngdec->bpp = 24;
      break;
    case PNG_COLOR_TYPE_RGB_ALPHA:
      GST_LOG_OBJECT (pngdec, "we have an alpha channel, depth is 32 bits");
      pngdec->bpp = 32;
      break;
    default:
      GST_ELEMENT_ERROR (pngdec, STREAM, NOT_IMPLEMENTED, (NULL),
          ("pngdec does not support this color type"));
      ret = GST_FLOW_NOT_SUPPORTED;
      goto beach;
  }

  caps = gst_caps_new_simple ("video/x-raw-rgb",
      "width", G_TYPE_INT, pngdec->width,
      "height", G_TYPE_INT, pngdec->height,
      "bpp", G_TYPE_INT, pngdec->bpp,
      "framerate", GST_TYPE_FRACTION, pngdec->fps_n, pngdec->fps_d, NULL);

  templ = gst_static_pad_template_get (&gst_pngdec_src_pad_template);

  res = gst_caps_intersect (caps, gst_pad_template_get_caps (templ));

  gst_caps_unref (caps);
  gst_object_unref (templ);

  if (!gst_pad_set_caps (pngdec->srcpad, res))
    ret = GST_FLOW_NOT_NEGOTIATED;

  GST_DEBUG_OBJECT (pngdec, "our caps %" GST_PTR_FORMAT, res);

  gst_caps_unref (res);

  /* Push a newsegment event */
  if (pngdec->need_newsegment) {
    gst_pad_push_event (pngdec->srcpad,
        gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, -1, 0));
    pngdec->need_newsegment = FALSE;
  }

beach:
  return ret;
}
Beispiel #10
0
GstCaps * _owr_payload_create_rtp_caps(OwrPayload *payload)
{
    OwrPayloadPrivate *priv;
    GstCaps *caps = NULL;
    GEnumClass *enum_class;
    GEnumValue *enum_value;
    gchar *encoding_name;
    gboolean ccm_fir = FALSE, nack_pli = FALSE;

    g_return_val_if_fail(payload, NULL);
    priv = payload->priv;

    switch (priv->codec_type) {
    case OWR_CODEC_TYPE_PCMU:
        encoding_name = "PCMU";
        break;

    case OWR_CODEC_TYPE_PCMA:
        encoding_name = "PCMA";
        break;

    case OWR_CODEC_TYPE_OPUS:
        encoding_name = "X-GST-OPUS-DRAFT-SPITTKA-00";
        break;

    case OWR_CODEC_TYPE_H264:
        encoding_name = "H264";
        break;

    case OWR_CODEC_TYPE_VP8:
        encoding_name = "VP8-DRAFT-IETF-01";
        break;

    default:
        g_return_val_if_reached(NULL);
    }

    caps = gst_caps_new_simple("application/x-rtp",
        "encoding-name", G_TYPE_STRING, encoding_name,
        "payload", G_TYPE_INT, priv->payload_type,
        "clock-rate", G_TYPE_INT, priv->clock_rate,
        NULL);
    enum_class = G_ENUM_CLASS(g_type_class_ref(OWR_TYPE_MEDIA_TYPE));
    enum_value = g_enum_get_value(enum_class, priv->media_type);
    if (enum_value)
        gst_caps_set_simple(caps, "media", G_TYPE_STRING, enum_value->value_nick, NULL);
    g_type_class_unref(enum_class);

    if (OWR_IS_VIDEO_PAYLOAD(payload)) {
        g_object_get(OWR_VIDEO_PAYLOAD(payload), "ccm-fir", &ccm_fir, "nack-pli", &nack_pli, NULL);
        gst_caps_set_simple(caps, "rtcp-fb-ccm-fir", G_TYPE_BOOLEAN, ccm_fir, "rtcp-fb-nack-pli",
            G_TYPE_BOOLEAN, nack_pli, NULL);
    } else if (priv->media_type == OWR_MEDIA_TYPE_AUDIO) {
        guint channels = 0;
        if (OWR_IS_AUDIO_PAYLOAD(payload))
            g_object_get(OWR_AUDIO_PAYLOAD(payload), "channels", &channels, NULL);
        if (channels > 0) {
            gst_caps_set_simple(caps,
                "channels", G_TYPE_INT, channels,
                NULL);
        }
    }

    return caps;
}
Beispiel #11
0
static int
gst_dv1394src_iso_receive (raw1394handle_t handle, int channel, size_t len,
    quadlet_t * data)
{
  GstDV1394Src *dv1394src = gst_dv1394src_from_raw1394handle (handle);

  if (len > 16) {
    /*
       the following code taken from kino-0.51 (Dan Dennedy/Charles Yates)
       Kindly relicensed under the LGPL. See the commit log for version 1.6 of
       this file in CVS.
     */
    unsigned char *p = (unsigned char *) &data[3];

    int section_type = p[0] >> 5;       /* section type is in bits 5 - 7 */
    int dif_sequence = p[1] >> 4;       /* dif sequence number is in bits 4 - 7 */
    int dif_block = p[2];

    /* if we are at the beginning of a frame, 
       we set buf=frame, and alloc a new buffer for frame
     */
    if (section_type == 0 && dif_sequence == 0) {       // dif header
      if (!GST_PAD_CAPS (GST_BASE_SRC_PAD (dv1394src))) {
        GstCaps *caps;

        // figure format (NTSC/PAL)
        if (p[3] & 0x80) {
          // PAL
          dv1394src->frame_size = PAL_FRAMESIZE;
          dv1394src->frame_rate = PAL_FRAMERATE;
          GST_DEBUG ("PAL data");
          caps = gst_caps_new_simple ("video/x-dv",
              "format", G_TYPE_STRING, "PAL",
              "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
        } else {
          // NTSC (untested)
          dv1394src->frame_size = NTSC_FRAMESIZE;
          dv1394src->frame_rate = NTSC_FRAMERATE;
          GST_DEBUG
              ("NTSC data [untested] - please report success/failure to <*****@*****.**>");
          caps = gst_caps_new_simple ("video/x-dv",
              "format", G_TYPE_STRING, "NTSC",
              "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
        }
        gst_pad_set_caps (GST_BASE_SRC_PAD (dv1394src), caps);
        gst_caps_unref (caps);
      }
      // drop last frame when not complete
      if (!dv1394src->drop_incomplete
          || dv1394src->bytes_in_frame == dv1394src->frame_size) {
        dv1394src->buf = dv1394src->frame;
      } else {
        GST_INFO_OBJECT (GST_ELEMENT (dv1394src), "incomplete frame dropped");
        g_signal_emit (G_OBJECT (dv1394src),
            gst_dv1394src_signals[SIGNAL_FRAME_DROPPED], 0);
        if (dv1394src->frame) {
          gst_buffer_unref (dv1394src->frame);
        }
      }
      if ((dv1394src->frame_sequence + 1) % (dv1394src->skip +
              dv1394src->consecutive) < dv1394src->consecutive) {
        GstBuffer *buf;
        gint64 i64;

        buf = gst_buffer_new_and_alloc (dv1394src->frame_size);

        /* fill in offset, duration, timestamp */
        GST_BUFFER_OFFSET (buf) = dv1394src->frame_sequence;
        dv1394src->frame = buf;
      }
      dv1394src->frame_sequence++;
      dv1394src->bytes_in_frame = 0;
    }

    if (dv1394src->frame != NULL) {
      guint8 *data = GST_BUFFER_DATA (dv1394src->frame);

      switch (section_type) {
        case 0:                /* 1 Header block */
          /* p[3] |= 0x80; // hack to force PAL data */
          memcpy (data + dif_sequence * 150 * 80, p, 480);
          break;

        case 1:                /* 2 Subcode blocks */
          memcpy (data + dif_sequence * 150 * 80 + (1 + dif_block) * 80, p,
              480);
          break;

        case 2:                /* 3 VAUX blocks */
          memcpy (data + dif_sequence * 150 * 80 + (3 + dif_block) * 80, p,
              480);
          break;

        case 3:                /* 9 Audio blocks interleaved with video */
          memcpy (data + dif_sequence * 150 * 80 + (6 + dif_block * 16) * 80, p,
              480);
          break;

        case 4:                /* 135 Video blocks interleaved with audio */
          memcpy (data + dif_sequence * 150 * 80 + (7 + (dif_block / 15) +
                  dif_block) * 80, p, 480);
          break;

        default:               /* we can't handle any other data */
          break;
      }
      dv1394src->bytes_in_frame += 480;
    }
  }
static gboolean
gst_rtp_amr_depay_setcaps (GstRTPBaseDepayload * depayload, GstCaps * caps)
{
  GstStructure *structure;
  GstCaps *srccaps;
  GstRtpAMRDepay *rtpamrdepay;
  const gchar *params;
  const gchar *str, *type;
  gint clock_rate, need_clock_rate;
  gboolean res;

  rtpamrdepay = GST_RTP_AMR_DEPAY (depayload);

  structure = gst_caps_get_structure (caps, 0);

  /* figure out the mode first and set the clock rates */
  if ((str = gst_structure_get_string (structure, "encoding-name"))) {
    if (strcmp (str, "AMR") == 0) {
      rtpamrdepay->mode = GST_RTP_AMR_DP_MODE_NB;
      need_clock_rate = 8000;
      type = "audio/AMR";
    } else if (strcmp (str, "AMR-WB") == 0) {
      rtpamrdepay->mode = GST_RTP_AMR_DP_MODE_WB;
      need_clock_rate = 16000;
      type = "audio/AMR-WB";
    } else
      goto invalid_mode;
  } else
    goto invalid_mode;

  if (!(str = gst_structure_get_string (structure, "octet-align")))
    rtpamrdepay->octet_align = FALSE;
  else
    rtpamrdepay->octet_align = (atoi (str) == 1);

  if (!(str = gst_structure_get_string (structure, "crc")))
    rtpamrdepay->crc = FALSE;
  else
    rtpamrdepay->crc = (atoi (str) == 1);

  if (rtpamrdepay->crc) {
    /* crc mode implies octet aligned mode */
    rtpamrdepay->octet_align = TRUE;
  }

  if (!(str = gst_structure_get_string (structure, "robust-sorting")))
    rtpamrdepay->robust_sorting = FALSE;
  else
    rtpamrdepay->robust_sorting = (atoi (str) == 1);

  if (rtpamrdepay->robust_sorting) {
    /* robust_sorting mode implies octet aligned mode */
    rtpamrdepay->octet_align = TRUE;
  }

  if (!(str = gst_structure_get_string (structure, "interleaving")))
    rtpamrdepay->interleaving = FALSE;
  else
    rtpamrdepay->interleaving = (atoi (str) == 1);

  if (rtpamrdepay->interleaving) {
    /* interleaving mode implies octet aligned mode */
    rtpamrdepay->octet_align = TRUE;
  }

  if (!(params = gst_structure_get_string (structure, "encoding-params")))
    rtpamrdepay->channels = 1;
  else {
    rtpamrdepay->channels = atoi (params);
  }

  if (!gst_structure_get_int (structure, "clock-rate", &clock_rate))
    clock_rate = need_clock_rate;
  depayload->clock_rate = clock_rate;

  /* we require 1 channel, 8000 Hz, octet aligned, no CRC,
   * no robust sorting, no interleaving for now */
  if (rtpamrdepay->channels != 1)
    return FALSE;
  if (clock_rate != need_clock_rate)
    return FALSE;
  if (rtpamrdepay->octet_align != TRUE)
    return FALSE;
  if (rtpamrdepay->robust_sorting != FALSE)
    return FALSE;
  if (rtpamrdepay->interleaving != FALSE)
    return FALSE;

  srccaps = gst_caps_new_simple (type,
      "channels", G_TYPE_INT, rtpamrdepay->channels,
      "rate", G_TYPE_INT, clock_rate, NULL);
  res = gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload), srccaps);
  gst_caps_unref (srccaps);

  return res;

  /* ERRORS */
invalid_mode:
  {
    GST_ERROR_OBJECT (rtpamrdepay, "invalid encoding-name");
    return FALSE;
  }
}
Beispiel #13
0
static gboolean
gst_rdt_depay_setcaps (GstPad * pad, GstCaps * caps)
{
  GstStructure *structure;
  GstRDTDepay *rdtdepay;
  GstCaps *srccaps;
  gint clock_rate = 1000;       /* default */
  const GValue *value;
  GstBuffer *header;

  rdtdepay = GST_RDT_DEPAY (GST_PAD_PARENT (pad));

  structure = gst_caps_get_structure (caps, 0);

  if (gst_structure_has_field (structure, "clock-rate"))
    gst_structure_get_int (structure, "clock-rate", &clock_rate);

  /* config contains the RealMedia header as a buffer. */
  value = gst_structure_get_value (structure, "config");
  if (!value)
    goto no_header;

  header = gst_value_get_buffer (value);
  if (!header)
    goto no_header;

  /* get other values for newsegment */
  value = gst_structure_get_value (structure, "npt-start");
  if (value && G_VALUE_HOLDS_UINT64 (value))
    rdtdepay->npt_start = g_value_get_uint64 (value);
  else
    rdtdepay->npt_start = 0;
  GST_DEBUG_OBJECT (rdtdepay, "NPT start %" G_GUINT64_FORMAT,
      rdtdepay->npt_start);

  value = gst_structure_get_value (structure, "npt-stop");
  if (value && G_VALUE_HOLDS_UINT64 (value))
    rdtdepay->npt_stop = g_value_get_uint64 (value);
  else
    rdtdepay->npt_stop = -1;

  GST_DEBUG_OBJECT (rdtdepay, "NPT stop %" G_GUINT64_FORMAT,
      rdtdepay->npt_stop);

  value = gst_structure_get_value (structure, "play-speed");
  if (value && G_VALUE_HOLDS_DOUBLE (value))
    rdtdepay->play_speed = g_value_get_double (value);
  else
    rdtdepay->play_speed = 1.0;

  value = gst_structure_get_value (structure, "play-scale");
  if (value && G_VALUE_HOLDS_DOUBLE (value))
    rdtdepay->play_scale = g_value_get_double (value);
  else
    rdtdepay->play_scale = 1.0;

  /* caps seem good, configure element */
  rdtdepay->clock_rate = clock_rate;

  /* set caps on pad and on header */
  srccaps = gst_caps_new_simple ("application/vnd.rn-realmedia", NULL);
  gst_pad_set_caps (rdtdepay->srcpad, srccaps);
  gst_caps_unref (srccaps);

  if (rdtdepay->header)
    gst_buffer_unref (rdtdepay->header);
  rdtdepay->header = gst_buffer_ref (header);

  return TRUE;

  /* ERRORS */
no_header:
  {
    GST_ERROR_OBJECT (rdtdepay, "no header found in caps, no 'config' field");
    return FALSE;
  }
}
int
main (int argc, char *argv[])
{
  GstBus *bus;
  GstElement *filter, *convert, *sink;
  GstCaps *caps;
  gboolean res;
  SprinkleState *state;

  gst_init (&argc, &argv);

  loop = g_main_loop_new (NULL, TRUE);

  pipeline = gst_pipeline_new ("pipeline");

  /* add the fixed part to the pipeline. Remember that we need a capsfilter
   * after adder so that multiple sources are not racing to negotiate
   * a format */
  adder = gst_element_factory_make ("adder", "adder");
  filter = gst_element_factory_make ("capsfilter", "filter");
  convert = gst_element_factory_make ("audioconvert", "convert");
  sink = gst_element_factory_make ("autoaudiosink", "sink");

  caps = gst_caps_new_simple ("audio/x-raw",
      "format", G_TYPE_STRING, "S16LE",
      "channels", G_TYPE_INT, 2, "rate", G_TYPE_INT, 44100, NULL);
  g_object_set (filter, "caps", caps, NULL);
  gst_caps_unref (caps);

  gst_bin_add_many (GST_BIN (pipeline), adder, filter, convert, sink, NULL);

  res = gst_element_link_many (adder, filter, convert, sink, NULL);
  g_assert (res);

  /* setup message handling */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  gst_bus_add_signal_watch_full (bus, G_PRIORITY_HIGH);
  g_signal_connect (bus, "message::error", (GCallback) message_received,
      pipeline);
  g_signal_connect (bus, "message::warning", (GCallback) message_received,
      pipeline);
  g_signal_connect (bus, "message::eos", (GCallback) eos_message_received,
      pipeline);

  /* we set the pipeline to PLAYING, the pipeline will not yet preroll because
   * there is no source providing data for it yet */
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* and add the function that modifies the pipeline every 100ms */
  state = create_state ();
  g_timeout_add (100, (GSourceFunc) do_sprinkle, state);

  /* go to main loop */
  g_main_loop_run (loop);

  gst_element_set_state (pipeline, GST_STATE_NULL);

  free_state (state);
  gst_object_unref (bus);
  gst_object_unref (pipeline);

  return 0;
}
Beispiel #15
0
static void
gst_gnome_vfs_src_received_headers_callback (gconstpointer in,
    gsize in_size, gpointer out, gsize out_size, gpointer callback_data)
{
  GList *i;
  gint icy_metaint;
  GstGnomeVFSSrc *src = GST_GNOME_VFS_SRC (callback_data);
  GnomeVFSModuleCallbackReceivedHeadersIn *in_args =
      (GnomeVFSModuleCallbackReceivedHeadersIn *) in;

  /* This is only used for internet radio stuff right now */
  if (!src->iradio_mode)
    return;

  GST_DEBUG_OBJECT (src, "receiving internet radio metadata\n");

  /* FIXME: Could we use "Accept-Ranges: bytes"
   * http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.5
   * to enable pull-mode?
   */

  for (i = in_args->headers; i; i = i->next) {
    char *data = (char *) i->data;
    char *key = data;
    char *value = strchr (data, ':');

    if (!value)
      continue;

    value++;
    g_strstrip (value);
    if (!strlen (value))
      continue;

    GST_LOG_OBJECT (src, "data %s", data);

    /* Icecast stuff */
    if (strncmp (data, "icy-metaint:", 12) == 0) {      /* ugh */
      if (sscanf (data + 12, "%d", &icy_metaint) == 1) {
        if (icy_metaint > 0) {
          GstCaps *icy_caps;

          icy_caps = gst_caps_new_simple ("application/x-icy",
              "metadata-interval", G_TYPE_INT, icy_metaint, NULL);
          gst_pad_set_caps (GST_BASE_SRC_PAD (src), icy_caps);
          gst_caps_unref (icy_caps);
        }
      }
      continue;
    }

    if (!strncmp (data, "icy-", 4))
      key = data + 4;
    else
      continue;

    GST_DEBUG_OBJECT (src, "key: %s", key);
    if (!strncmp (key, "name", 4)) {
      g_free (src->iradio_name);
      src->iradio_name = gst_gnome_vfs_src_unicodify (value);
      if (src->iradio_name)
        g_object_notify (G_OBJECT (src), "iradio-name");
    } else if (!strncmp (key, "genre", 5)) {
      g_free (src->iradio_genre);
      src->iradio_genre = gst_gnome_vfs_src_unicodify (value);
      if (src->iradio_genre)
        g_object_notify (G_OBJECT (src), "iradio-genre");
    } else if (!strncmp (key, "url", 3)) {
      g_free (src->iradio_url);
      src->iradio_url = gst_gnome_vfs_src_unicodify (value);
      if (src->iradio_url)
        g_object_notify (G_OBJECT (src), "iradio-url");
    }
  }
}
Beispiel #16
0
AudioDecoderGst::AudioDecoderGst(const AudioInfo& info)
{
    // init GStreamer. TODO: what about doing this in MediaHandlerGst ctor?
    gst_init (NULL, NULL);

    GstCaps* srccaps=0;

    if (info.type == CODEC_TYPE_FLASH && info.codec == AUDIO_CODEC_MP3)
    {
        srccaps = gst_caps_new_simple ("audio/mpeg",
		"mpegversion", G_TYPE_INT, 1,
		"layer", G_TYPE_INT, 3,
		"rate", G_TYPE_INT, info.sampleRate,
		"channels", G_TYPE_INT, info.stereo ? 2 : 1, NULL);
        setup(srccaps);
        return;
    }
    
    if (info.type == CODEC_TYPE_FLASH && info.codec == AUDIO_CODEC_NELLYMOSER)
    {
        srccaps = gst_caps_new_simple ("audio/x-nellymoser",
		"rate", G_TYPE_INT, info.sampleRate,
		"channels", G_TYPE_INT, info.stereo ? 2 : 1, NULL);
        setup(srccaps);
        return;
    }

    if (info.type == CODEC_TYPE_FLASH && info.codec == AUDIO_CODEC_AAC)
    {
        srccaps = gst_caps_new_simple ("audio/mpeg",
            "mpegversion", G_TYPE_INT, 4,
            "rate", G_TYPE_INT, 44100,
            "channels", G_TYPE_INT, 2, 
            NULL);

        ExtraAudioInfoFlv* extra = dynamic_cast<ExtraAudioInfoFlv*>(info.extra.get());
        if (extra) {
            GstBuffer* buf = gst_buffer_new_and_alloc(extra->size);
            memcpy(GST_BUFFER_DATA(buf), extra->data.get(), extra->size);
            gst_caps_set_simple (srccaps, "codec_data", GST_TYPE_BUFFER, buf, NULL);

        } else {
            log_error(_("Creating AAC decoder without extra data. This will probably fail!"));
        }

        setup(srccaps);
        return;
    }


    if (info.type == CODEC_TYPE_FLASH) {
		boost::format err = boost::format(
                _("AudioDecoderGst: cannot handle codec %d (%s)")) %
                info.codec %
                (audioCodecType)info.codec;
        throw MediaException(err.str());
    }

    ExtraInfoGst* extraaudioinfo = dynamic_cast<ExtraInfoGst*>(info.extra.get());

    if (!extraaudioinfo) {
		boost::format err = boost::format(
                _("AudioDecoderGst: cannot handle codec %d "
                  "(no ExtraInfoGst attached)")) %
                info.codec;
        throw MediaException(err.str());
    }

    setup(extraaudioinfo->caps);
}
GdkPixbuf *
xplayer_gst_playbin_get_frame (GstElement *play)
{
  GstStructure *s;
  GstSample *sample = NULL;
  GdkPixbuf *pixbuf = NULL;
  GstCaps *to_caps, *sample_caps;
  gint outwidth = 0;
  gint outheight = 0;
  GstMemory *memory;
  GstMapInfo info;
  GdkPixbufRotation rotation = GDK_PIXBUF_ROTATE_NONE;

  g_return_val_if_fail (play != NULL, NULL);
  g_return_val_if_fail (GST_IS_ELEMENT (play), NULL);

  /* our desired output format (RGB24) */
  to_caps = gst_caps_new_simple ("video/x-raw",
      "format", G_TYPE_STRING, "RGB",
      /* Note: we don't ask for a specific width/height here, so that
       * videoscale can adjust dimensions from a non-1/1 pixel aspect
       * ratio to a 1/1 pixel-aspect-ratio. We also don't ask for a
       * specific framerate, because the input framerate won't
       * necessarily match the output framerate if there's a deinterlacer
       * in the pipeline. */
      "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
      NULL);

  /* get frame */
  g_signal_emit_by_name (play, "convert-sample", to_caps, &sample);
  gst_caps_unref (to_caps);

  if (!sample) {
    GST_DEBUG ("Could not take screenshot: %s",
        "failed to retrieve or convert video frame");
    g_warning ("Could not take screenshot: %s",
        "failed to retrieve or convert video frame");
    return NULL;
  }

  sample_caps = gst_sample_get_caps (sample);
  if (!sample_caps) {
    GST_DEBUG ("Could not take screenshot: %s", "no caps on output buffer");
    g_warning ("Could not take screenshot: %s", "no caps on output buffer");
    return NULL;
  }

  GST_DEBUG ("frame caps: %" GST_PTR_FORMAT, sample_caps);

  s = gst_caps_get_structure (sample_caps, 0);
  gst_structure_get_int (s, "width", &outwidth);
  gst_structure_get_int (s, "height", &outheight);
  if (outwidth <= 0 || outheight <= 0)
    goto done;

  memory = gst_buffer_get_memory (gst_sample_get_buffer (sample), 0);
  gst_memory_map (memory, &info, GST_MAP_READ);

  /* create pixbuf from that - use our own destroy function */
  pixbuf = gdk_pixbuf_new_from_data (info.data,
      GDK_COLORSPACE_RGB, FALSE, 8, outwidth, outheight,
      GST_ROUND_UP_4 (outwidth * 3), destroy_pixbuf, sample);

  gst_memory_unmap (memory, &info);

done:
  if (!pixbuf) {
    GST_DEBUG ("Could not take screenshot: %s", "could not create pixbuf");
    g_warning ("Could not take screenshot: %s", "could not create pixbuf");
    gst_sample_unref (sample);
  }

  /* Did we check whether we need to rotate the video? */
  if (g_object_get_data (G_OBJECT (play), "orientation-checked") == NULL) {
    GstTagList *tags = NULL;

    g_signal_emit_by_name (G_OBJECT (play), "get-video-tags", 0, &tags);
    if (tags) {
      char *orientation_str;
      gboolean ret;

      ret = gst_tag_list_get_string_index (tags, GST_TAG_IMAGE_ORIENTATION, 0, &orientation_str);
      if (!ret || !orientation_str)
        rotation = GDK_PIXBUF_ROTATE_NONE;
      else if (g_str_equal (orientation_str, "rotate-90"))
        rotation = GDK_PIXBUF_ROTATE_CLOCKWISE;
      else if (g_str_equal (orientation_str, "rotate-180"))
        rotation = GDK_PIXBUF_ROTATE_UPSIDEDOWN;
      else if (g_str_equal (orientation_str, "rotate-270"))
        rotation = GDK_PIXBUF_ROTATE_COUNTERCLOCKWISE;

      gst_tag_list_unref (tags);
    }

    g_object_set_data (G_OBJECT (play), "orientation-checked", GINT_TO_POINTER(1));
    g_object_set_data (G_OBJECT (play), "orientation", GINT_TO_POINTER(rotation));
  }

  rotation = GPOINTER_TO_INT (g_object_get_data (G_OBJECT (play), "orientation"));
  if (rotation != GDK_PIXBUF_ROTATE_NONE) {
    GdkPixbuf *rotated;

    rotated = gdk_pixbuf_rotate_simple (pixbuf, rotation);
    if (rotated) {
      g_object_unref (pixbuf);
      pixbuf = rotated;
    }
  }

  return pixbuf;
}
Beispiel #18
0
bool Player::prepare()
{
    //Init Gst
    //
    QString caps_value = "audio/x-raw-int";

      // On mac we bundle the gstreamer plugins with knowthelist
    #if defined(Q_OS_DARWIN)
      QString scanner_path;
      QString plugin_path;
      QString registry_filename;

      caps_value = "audio/x-raw-float";
      QDir pd(QCoreApplication::applicationDirPath() + "/../plugins");
      scanner_path = QCoreApplication::applicationDirPath() + "/../plugins/gst-plugin-scanner";
      plugin_path = QCoreApplication::applicationDirPath() + "/../plugins/gstreamer";
      registry_filename = QDesktopServices::storageLocation(QDesktopServices::DataLocation) +
              QString("/gst-registry-%1-bin").arg(QCoreApplication::applicationVersion());

      if ( pd.exists())
        setenv("GST_PLUGIN_SCANNER", scanner_path.toLocal8Bit().constData(), 1);

      if ( pd.exists()) {
        setenv("GST_PLUGIN_PATH", plugin_path.toLocal8Bit().constData(), 1);
        // Never load plugins from anywhere else.
        setenv("GST_PLUGIN_SYSTEM_PATH", plugin_path.toLocal8Bit().constData(), 1);
      }

      if (!registry_filename.isEmpty()) {
        setenv("GST_REGISTRY", registry_filename.toLocal8Bit().constData(), 1);
      }
    #endif

      gst_init (0, 0);

    //prepare

        GstElement *dec, *conv,*resample,*sink, *gain, *audio, *vol, *level, *equalizer;
        GstElement *levelout;
        GstPad *audiopad;
        GstCaps *caps;
        pipeline = gst_pipeline_new ("pipeline");
        bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
        caps = gst_caps_new_simple (caps_value.toLatin1().data(),
                                    "channels", G_TYPE_INT, 2, NULL);


        dec = gst_element_factory_make ("decodebin2", "decoder");
        g_signal_connect (dec, "new-decoded-pad", G_CALLBACK (cb_newpad), this);
        gst_bin_add (GST_BIN (pipeline), dec);

        audio = gst_bin_new ("audiobin");
        conv = gst_element_factory_make ("audioconvert", "aconv");
        resample = gst_element_factory_make ("audioresample", "resample");
        audiopad = gst_element_get_static_pad (conv, "sink");
        gain = gst_element_factory_make ("audioamplify", "gain");
        level = gst_element_factory_make ("level", "levelintern");
        vol = gst_element_factory_make ("volume", "volume");
        levelout = gst_element_factory_make ("level", "levelout");
        equalizer = gst_element_factory_make ("equalizer-3bands", "equalizer");
        sink = gst_element_factory_make ("autoaudiosink", "sink");

        g_object_set (level, "message", TRUE, NULL);
        g_object_set (levelout, "message", TRUE, NULL);
        g_object_set (level, "peak-ttl", 300000000000, NULL);

        gst_bin_add_many (GST_BIN (audio), conv, resample, level, gain, equalizer, levelout, vol, sink, NULL);
        gst_element_link (conv,resample);

        gst_element_link_filtered (resample, level, caps);
        gst_element_link (level, gain);
        gst_element_link (gain, equalizer);
        gst_element_link (equalizer, vol);
        gst_element_link_filtered (vol, levelout, caps);
        gst_element_link (levelout,sink);

        gst_element_add_pad (audio, gst_ghost_pad_new ("sink", audiopad));
        gst_bin_add (GST_BIN (pipeline), audio);


        GstElement *l_src;
        l_src = gst_element_factory_make ("filesrc", "localsrc");
        gst_bin_add_many (GST_BIN (pipeline), l_src, NULL);
        gst_element_set_state (l_src, GST_STATE_NULL);
        gst_element_link ( l_src,dec);

        gst_bus_set_sync_handler (bus, bus_cb, this);

        gst_object_unref (audiopad);

        return pipeline;
}
/**
 * gst_basertppayload_set_outcaps:
 * @payload: a #GstBaseRTPPayload
 * @fieldname: the first field name or %NULL
 * @...: field values
 *
 * Configure the output caps with the optional parameters.
 *
 * Variable arguments should be in the form field name, field type
 * (as a GType), value(s).  The last variable argument should be NULL.
 *
 * Returns: %TRUE if the caps could be set.
 */
gboolean
gst_basertppayload_set_outcaps (GstBaseRTPPayload * payload, gchar * fieldname,
    ...)
{
  GstCaps *srccaps, *peercaps;
  gboolean res;

  /* fill in the defaults, there properties cannot be negotiated. */
  srccaps = gst_caps_new_simple ("application/x-rtp",
      "media", G_TYPE_STRING, payload->media,
      "clock-rate", G_TYPE_INT, payload->clock_rate,
      "encoding-name", G_TYPE_STRING, payload->encoding_name, NULL);

  GST_DEBUG_OBJECT (payload, "defaults: %" GST_PTR_FORMAT, srccaps);

  if (fieldname) {
    va_list varargs;

    /* override with custom properties */
    va_start (varargs, fieldname);
    gst_caps_set_simple_valist (srccaps, fieldname, varargs);
    va_end (varargs);

    GST_DEBUG_OBJECT (payload, "custom added: %" GST_PTR_FORMAT, srccaps);
  }

  /* the peer caps can override some of the defaults */
  peercaps = gst_pad_peer_get_caps (payload->srcpad);
  if (peercaps == NULL) {
    /* no peer caps, just add the other properties */
    gst_caps_set_simple (srccaps,
        "payload", G_TYPE_INT, GST_BASE_RTP_PAYLOAD_PT (payload),
        "ssrc", G_TYPE_UINT, payload->current_ssrc,
        "clock-base", G_TYPE_UINT, payload->ts_base,
        "seqnum-base", G_TYPE_UINT, payload->seqnum_base, NULL);

    GST_DEBUG_OBJECT (payload, "no peer caps: %" GST_PTR_FORMAT, srccaps);
  } else {
    GstCaps *temp;
    GstStructure *s, *d;
    const GValue *value;
    gint pt;

    /* peer provides caps we can use to fixate, intersect. This always returns a
     * writable caps. */
    temp = gst_caps_intersect (srccaps, peercaps);
    gst_caps_unref (srccaps);
    gst_caps_unref (peercaps);

    /* now fixate, start by taking the first caps */
    gst_caps_truncate (temp);

    /* get first structure */
    s = gst_caps_get_structure (temp, 0);

    if (gst_structure_get_int (s, "payload", &pt)) {
      /* use peer pt */
      GST_BASE_RTP_PAYLOAD_PT (payload) = pt;
      GST_LOG_OBJECT (payload, "using peer pt %d", pt);
    } else {
      if (gst_structure_has_field (s, "payload")) {
        /* can only fixate if there is a field */
        gst_structure_fixate_field_nearest_int (s, "payload",
            GST_BASE_RTP_PAYLOAD_PT (payload));
        gst_structure_get_int (s, "payload", &pt);
        GST_LOG_OBJECT (payload, "using peer pt %d", pt);
      } else {
        /* no pt field, use the internal pt */
        pt = GST_BASE_RTP_PAYLOAD_PT (payload);
        gst_structure_set (s, "payload", G_TYPE_INT, pt, NULL);
        GST_LOG_OBJECT (payload, "using internal pt %d", pt);
      }
    }

    if (gst_structure_has_field_typed (s, "ssrc", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "ssrc");
      payload->current_ssrc = g_value_get_uint (value);
      GST_LOG_OBJECT (payload, "using peer ssrc %08x", payload->current_ssrc);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "ssrc", G_TYPE_UINT, payload->current_ssrc, NULL);
      GST_LOG_OBJECT (payload, "using internal ssrc %08x",
          payload->current_ssrc);
    }

    if (gst_structure_has_field_typed (s, "clock-base", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "clock-base");
      payload->ts_base = g_value_get_uint (value);
      GST_LOG_OBJECT (payload, "using peer clock-base %u", payload->ts_base);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "clock-base", G_TYPE_UINT, payload->ts_base, NULL);
      GST_LOG_OBJECT (payload, "using internal clock-base %u",
          payload->ts_base);
    }
    if (gst_structure_has_field_typed (s, "seqnum-base", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "seqnum-base");
      payload->seqnum_base = g_value_get_uint (value);
      GST_LOG_OBJECT (payload, "using peer seqnum-base %u",
          payload->seqnum_base);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "seqnum-base", G_TYPE_UINT, payload->seqnum_base,
          NULL);
      GST_LOG_OBJECT (payload, "using internal seqnum-base %u",
          payload->seqnum_base);
    }

    /* make the target caps by copying over all the fixed caps, removing the
     * unfixed caps. */
    srccaps = gst_caps_new_simple (gst_structure_get_name (s), NULL);
    d = gst_caps_get_structure (srccaps, 0);

    gst_structure_foreach (s, (GstStructureForeachFunc) copy_fixed, d);

    gst_caps_unref (temp);

    GST_DEBUG_OBJECT (payload, "with peer caps: %" GST_PTR_FORMAT, srccaps);
  }

  res = gst_pad_set_caps (GST_BASE_RTP_PAYLOAD_SRCPAD (payload), srccaps);
  gst_caps_unref (srccaps);

  return res;
}
static GstFlowReturn
gst_ac3_parse_handle_frame (GstBaseParse * parse,
    GstBaseParseFrame * frame, gint * skipsize)
{
  GstAc3Parse *ac3parse = GST_AC3_PARSE (parse);
  GstBuffer *buf = frame->buffer;
  GstByteReader reader;
  gint off;
  gboolean lost_sync, draining, eac, more = FALSE;
  guint frmsiz, blocks, sid;
  guint rate, chans;
  gboolean update_rate = FALSE;
  gint framesize = 0;
  gint have_blocks = 0;
  GstMapInfo map;
  gboolean ret = FALSE;
  GstFlowReturn res = GST_FLOW_OK;

  gst_buffer_map (buf, &map, GST_MAP_READ);

  if (G_UNLIKELY (map.size < 6)) {
    *skipsize = 1;
    goto cleanup;
  }

  gst_byte_reader_init (&reader, map.data, map.size);
  off = gst_byte_reader_masked_scan_uint32 (&reader, 0xffff0000, 0x0b770000,
      0, map.size);

  GST_LOG_OBJECT (parse, "possible sync at buffer offset %d", off);

  /* didn't find anything that looks like a sync word, skip */
  if (off < 0) {
    *skipsize = map.size - 3;
    goto cleanup;
  }

  /* possible frame header, but not at offset 0? skip bytes before sync */
  if (off > 0) {
    *skipsize = off;
    goto cleanup;
  }

  /* make sure the values in the frame header look sane */
  if (!gst_ac3_parse_frame_header (ac3parse, buf, 0, &frmsiz, &rate, &chans,
          &blocks, &sid, &eac)) {
    *skipsize = off + 2;
    goto cleanup;
  }

  GST_LOG_OBJECT (parse, "size: %u, blocks: %u, rate: %u, chans: %u", frmsiz,
      blocks, rate, chans);

  framesize = frmsiz;

  if (G_UNLIKELY (g_atomic_int_get (&ac3parse->align) ==
          GST_AC3_PARSE_ALIGN_NONE))
    gst_ac3_parse_set_alignment (ac3parse, eac);

  GST_LOG_OBJECT (parse, "got frame");

  lost_sync = GST_BASE_PARSE_LOST_SYNC (parse);
  draining = GST_BASE_PARSE_DRAINING (parse);

  if (g_atomic_int_get (&ac3parse->align) == GST_AC3_PARSE_ALIGN_IEC61937) {
    /* We need 6 audio blocks from each substream, so we keep going forwards
     * till we have it */

    g_assert (blocks > 0);
    GST_LOG_OBJECT (ac3parse, "Need %d frames before pushing", 6 / blocks);

    if (sid != 0) {
      /* We need the first substream to be the one with id 0 */
      GST_LOG_OBJECT (ac3parse, "Skipping till we find sid 0");
      *skipsize = off + 2;
      goto cleanup;
    }

    framesize = 0;

    /* Loop till we have 6 blocks per substream */
    for (have_blocks = 0; !more && have_blocks < 6; have_blocks += blocks) {
      /* Loop till we get one frame from each substream */
      do {
        framesize += frmsiz;

        if (!gst_byte_reader_skip (&reader, frmsiz)
            || map.size < (framesize + 6)) {
          more = TRUE;
          break;
        }

        if (!gst_ac3_parse_frame_header (ac3parse, buf, framesize, &frmsiz,
                NULL, NULL, NULL, &sid, &eac)) {
          *skipsize = off + 2;
          goto cleanup;
        }
      } while (sid);
    }

    /* We're now at the next frame, so no need to skip if resyncing */
    frmsiz = 0;
  }

  if (lost_sync && !draining) {
    guint16 word = 0;

    GST_DEBUG_OBJECT (ac3parse, "resyncing; checking next frame syncword");

    if (more || !gst_byte_reader_skip (&reader, frmsiz) ||
        !gst_byte_reader_get_uint16_be (&reader, &word)) {
      GST_DEBUG_OBJECT (ac3parse, "... but not sufficient data");
      gst_base_parse_set_min_frame_size (parse, framesize + 6);
      *skipsize = 0;
      goto cleanup;
    } else {
      if (word != 0x0b77) {
        GST_DEBUG_OBJECT (ac3parse, "0x%x not OK", word);
        *skipsize = off + 2;
        goto cleanup;
      } else {
        /* ok, got sync now, let's assume constant frame size */
        gst_base_parse_set_min_frame_size (parse, framesize);
      }
    }
  }

  /* expect to have found a frame here */
  g_assert (framesize);
  ret = TRUE;

  /* arrange for metadata setup */
  if (G_UNLIKELY (sid)) {
    /* dependent frame, no need to (ac)count for or consider further */
    GST_LOG_OBJECT (parse, "sid: %d", sid);
    frame->flags |= GST_BASE_PARSE_FRAME_FLAG_NO_FRAME;
    /* TODO maybe also mark as DELTA_UNIT,
     * if that does not surprise baseparse elsewhere */
    /* occupies same time space as previous base frame */
    if (G_LIKELY (GST_BUFFER_TIMESTAMP (buf) >= GST_BUFFER_DURATION (buf)))
      GST_BUFFER_TIMESTAMP (buf) -= GST_BUFFER_DURATION (buf);
    /* only shortcut if we already arranged for caps */
    if (G_LIKELY (ac3parse->sample_rate > 0))
      goto cleanup;
  }

  if (G_UNLIKELY (ac3parse->sample_rate != rate || ac3parse->channels != chans
          || ac3parse->eac != eac)) {
    GstCaps *caps = gst_caps_new_simple (eac ? "audio/x-eac3" : "audio/x-ac3",
        "framed", G_TYPE_BOOLEAN, TRUE, "rate", G_TYPE_INT, rate,
        "channels", G_TYPE_INT, chans, NULL);
    gst_caps_set_simple (caps, "alignment", G_TYPE_STRING,
        g_atomic_int_get (&ac3parse->align) == GST_AC3_PARSE_ALIGN_IEC61937 ?
        "iec61937" : "frame", NULL);
    gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (parse), caps);
    gst_caps_unref (caps);

    ac3parse->sample_rate = rate;
    ac3parse->channels = chans;
    ac3parse->eac = eac;

    update_rate = TRUE;
  }

  if (G_UNLIKELY (ac3parse->blocks != blocks)) {
    ac3parse->blocks = blocks;

    update_rate = TRUE;
  }

  if (G_UNLIKELY (update_rate))
    gst_base_parse_set_frame_rate (parse, rate, 256 * blocks, 2, 2);

cleanup:
  gst_buffer_unmap (buf, &map);

  if (ret && framesize <= map.size) {
    res = gst_base_parse_finish_frame (parse, frame, framesize);
  }

  return res;
}
Beispiel #21
0
int main(int argc, char** argv) {
	char *config = getenv("GSCAM_CONFIG");
	if (config == NULL) {
		std::cout << "Problem getting GSCAM_CONFIG variable." << std::endl;
		exit(-1);
	}

	gst_init(0,0);
	std::cout << "Gstreamer Version: " << gst_version_string() << std::endl;

	GError *error = 0; //assignment to zero is a gst requirement
	GstElement *pipeline = gst_parse_launch(config,&error);
	if (pipeline == NULL) {
		std::cout << error->message << std::endl;
		exit(-1);
	}
	GstElement * sink = gst_element_factory_make("appsink",NULL);
	GstCaps * caps = gst_caps_new_simple("video/x-raw-rgb", NULL);
	gst_app_sink_set_caps(GST_APP_SINK(sink), caps);
	gst_caps_unref(caps);

	gst_base_sink_set_sync(GST_BASE_SINK(sink), TRUE);

	if(GST_IS_PIPELINE(pipeline)) {
	    GstPad *outpad = gst_bin_find_unlinked_pad(GST_BIN(pipeline), GST_PAD_SRC);
	    g_assert(outpad);
	    GstElement *outelement = gst_pad_get_parent_element(outpad);
	    g_assert(outelement);
	    gst_object_unref(outpad);


	    if(!gst_bin_add(GST_BIN(pipeline), sink)) {
		fprintf(stderr, "gst_bin_add() failed\n"); // TODO: do some unref
		gst_object_unref(outelement);
		gst_object_unref(pipeline);
		return -1;
	    }

	    if(!gst_element_link(outelement, sink)) {
		fprintf(stderr, "GStreamer: cannot link outelement(\"%s\") -> sink\n", gst_element_get_name(outelement));
		gst_object_unref(outelement);
		gst_object_unref(pipeline);
		return -1;
	    }

	    gst_object_unref(outelement);
	} else {
	    GstElement* launchpipe = pipeline;
	    pipeline = gst_pipeline_new(NULL);
	    g_assert(pipeline);

	    gst_object_unparent(GST_OBJECT(launchpipe));

	    gst_bin_add_many(GST_BIN(pipeline), launchpipe, sink, NULL);

	    if(!gst_element_link(launchpipe, sink)) {
		fprintf(stderr, "GStreamer: cannot link launchpipe -> sink\n");
		gst_object_unref(pipeline);
		return -1;
	    }
	}

	gst_element_set_state(pipeline, GST_STATE_PAUSED);

	if (gst_element_get_state(pipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
		std::cout << "Failed to PAUSE." << std::endl;
		exit(-1);
	} else {
		std::cout << "stream is PAUSED." << std::endl;
	}

	// We could probably do something with the camera name, check
	// errors or something, but at the moment, we don't care.
	std::string camera_name;
	if (camera_calibration_parsers::readCalibrationIni("../camera_parameters.txt", camera_name, camera_info)) {
	  ROS_INFO("Successfully read camera calibration.  Rerun camera calibrator if it is incorrect.");
	}
	else {
	  ROS_ERROR("No camera_parameters.txt file found.  Use default file if no other is available.");
	}

	ros::init(argc, argv, "gscam_publisher");
	ros::NodeHandle nh;

	int preroll;
	nh.param("brown/gscam/preroll", preroll, 0);
	if (preroll) {
		//The PAUSE, PLAY, PAUSE, PLAY cycle is to ensure proper pre-roll
		//I am told this is needed and am erring on the side of caution.
		gst_element_set_state(pipeline, GST_STATE_PLAYING);

		if (gst_element_get_state(pipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
			std::cout << "Failed to PLAY." << std::endl;
			exit(-1);
		} else {
			std::cout << "stream is PLAYING." << std::endl;
		}

		gst_element_set_state(pipeline, GST_STATE_PAUSED);

		if (gst_element_get_state(pipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
			std::cout << "Failed to PAUSE." << std::endl;
			exit(-1);
		} else {
			std::cout << "stream is PAUSED." << std::endl;
		}
	}

	image_transport::ImageTransport it(nh);
//	image_transport::CameraPublisher pub = it.advertiseCamera("gscam/image_raw", 1);

//------------------------------------
// Added by jschoi, 2012-08-13
	char topic_name[32];
	if(argc==1)
		sprintf(topic_name, "gscam/image_raw");
	else
		sprintf(topic_name,"%s",argv[1]);	// To get the name of topic from the first arguement
//------------------------------------
	image_transport::CameraPublisher pub = it.advertiseCamera(topic_name, 1);

	ros::ServiceServer set_camera_info = nh.advertiseService("gscam/set_camera_info", setCameraInfo);

	std::cout << "Processing..." << std::endl;

	//processVideo
	rosPad = false;
	gstreamerPad = true;
	gst_element_set_state(pipeline, GST_STATE_PLAYING);
	while(nh.ok()) {
                // This should block until a new frame is awake, this way, we'll run at the 
                // actual capture framerate of the device.
		GstBuffer* buf = gst_app_sink_pull_buffer(GST_APP_SINK(sink));
		if (!buf) break;

		GstPad* pad = gst_element_get_static_pad(sink, "sink");
		const GstCaps *caps = gst_pad_get_negotiated_caps(pad);
		GstStructure *structure = gst_caps_get_structure(caps,0);
		gst_structure_get_int(structure,"width",&width);
		gst_structure_get_int(structure,"height",&height);

		sensor_msgs::Image msg;
		msg.width = width; 
		msg.height = height;
		msg.encoding = "rgb8";
		msg.is_bigendian = false;
		msg.step = width*3;
		msg.data.resize(width*height*3);
		std::copy(buf->data, buf->data+(width*height*3), msg.data.begin());

		pub.publish(msg, camera_info);

                gst_buffer_unref(buf);

		ros::spinOnce();

	}

	//close out
	std::cout << "\nquitting..." << std::endl;
	gst_element_set_state(pipeline, GST_STATE_NULL);
	gst_object_unref(pipeline);

	return 0;
}
Beispiel #22
0
static gboolean
gst_divxenc_setcaps (GstPad * pad, GstCaps * caps)
{
  GstDivxEnc *divxenc;
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  gint w, h;
  const GValue *fps;
  guint32 fourcc;
  guint32 divx_cs;
  gint bitcnt = 0;
  gboolean ret = FALSE;

  divxenc = GST_DIVXENC (gst_pad_get_parent (pad));

  /* if there's something old around, remove it */
  gst_divxenc_unset (divxenc);

  gst_structure_get_int (structure, "width", &w);
  gst_structure_get_int (structure, "height", &h);
  gst_structure_get_fourcc (structure, "format", &fourcc);

  fps = gst_structure_get_value (structure, "framerate");
  if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
    divxenc->fps_n = gst_value_get_fraction_numerator (fps);
    divxenc->fps_d = gst_value_get_fraction_denominator (fps);
  } else {
    divxenc->fps_n = -1;
  }

  switch (fourcc) {
    case GST_MAKE_FOURCC ('I', '4', '2', '0'):
      divx_cs = GST_MAKE_FOURCC ('I', '4', '2', '0');
      break;
    case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
      divx_cs = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
      break;
    case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
      divx_cs = GST_MAKE_FOURCC ('Y', 'V', '1', '2');
      break;
    case GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U'):
      divx_cs = GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U');
      break;
    case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
      divx_cs = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y');
      break;
    default:
      ret = FALSE;
      goto done;
  }

  divxenc->csp = divx_cs;
  divxenc->bitcnt = bitcnt;
  divxenc->width = w;
  divxenc->height = h;

  /* try it */
  if (gst_divxenc_setup (divxenc)) {
    GstCaps *new_caps = NULL;

    new_caps = gst_caps_new_simple ("video/x-divx",
        "divxversion", G_TYPE_INT, 5,
        "width", G_TYPE_INT, w,
        "height", G_TYPE_INT, h,
        "framerate", GST_TYPE_FRACTION, divxenc->fps_n, divxenc->fps_d, NULL);

    if (new_caps) {

      if (!gst_pad_set_caps (divxenc->srcpad, new_caps)) {
        gst_divxenc_unset (divxenc);
        ret = FALSE;
        goto done;
      }
      gst_caps_unref (new_caps);
      ret = TRUE;
      goto done;

    }

  }

  /* if we got here - it's not good */

  ret = FALSE;

done:
  gst_object_unref (divxenc);
  return ret;
}
int
main (int argc, char *argv[])
{
  GstElement *bin;
  GstElement *decodebin, *decconvert;
  GstElement *capsfilter, *equalizer, *spectrum, *sinkconvert, *sink;
  GstCaps *caps;
  GstBus *bus;
  GtkWidget *appwindow, *vbox, *hbox, *scale;
  int i, num_bands = NBANDS;

  GOptionEntry options[] = {
    {"bands", 'b', 0, G_OPTION_ARG_INT, &num_bands,
        "Number of bands", NULL},
    {NULL}
  };
  GOptionContext *ctx;
  GError *err = NULL;

  ctx = g_option_context_new ("- demo of audio equalizer");
  g_option_context_add_main_entries (ctx, options, NULL);
  g_option_context_add_group (ctx, gst_init_get_option_group ());
  g_option_context_add_group (ctx, gtk_get_option_group (TRUE));

  if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
    g_print ("Error initializing: %s\n", err->message);
    g_option_context_free (ctx);
    g_clear_error (&err);
    exit (1);
  }
  g_option_context_free (ctx);

  if (argc < 2) {
    g_print ("Usage: %s <uri to play>\n", argv[0]);
    g_print ("    For optional arguments: --help\n");
    exit (-1);
  }

  gst_init (&argc, &argv);
  gtk_init (&argc, &argv);

  bin = gst_pipeline_new ("bin");

  /* Uri decoding */
  decodebin = gst_element_factory_make ("uridecodebin", "decoder");
  g_object_set (G_OBJECT (decodebin), "uri", argv[1], NULL);

  /* Force float32 samples */
  decconvert = gst_element_factory_make ("audioconvert", "decconvert");
  capsfilter = gst_element_factory_make ("capsfilter", "capsfilter");
  caps =
      gst_caps_new_simple ("audio/x-raw", "format", G_TYPE_STRING, "F32LE",
      NULL);
  g_object_set (capsfilter, "caps", caps, NULL);

  equalizer = gst_element_factory_make ("equalizer-nbands", "equalizer");
  g_object_set (G_OBJECT (equalizer), "num-bands", num_bands, NULL);

  spectrum = gst_element_factory_make ("spectrum", "spectrum");
  g_object_set (G_OBJECT (spectrum), "bands", spect_bands, "threshold", -80,
      "post-messages", TRUE, "interval", 500 * GST_MSECOND, NULL);

  sinkconvert = gst_element_factory_make ("audioconvert", "sinkconvert");

  sink = gst_element_factory_make ("autoaudiosink", "sink");

  gst_bin_add_many (GST_BIN (bin), decodebin, decconvert, capsfilter, equalizer,
      spectrum, sinkconvert, sink, NULL);
  if (!gst_element_link_many (decconvert, capsfilter, equalizer, spectrum,
          sinkconvert, sink, NULL)) {
    fprintf (stderr, "can't link elements\n");
    exit (1);
  }

  /* Handle dynamic pads */
  g_signal_connect (G_OBJECT (decodebin), "pad-added",
      G_CALLBACK (dynamic_link), gst_element_get_static_pad (decconvert,
          "sink"));

  bus = gst_element_get_bus (bin);
  gst_bus_add_watch (bus, message_handler, NULL);
  gst_object_unref (bus);

  appwindow = gtk_window_new (GTK_WINDOW_TOPLEVEL);
  gtk_window_set_title (GTK_WINDOW (appwindow), "Equalizer Demo");
  g_signal_connect (G_OBJECT (appwindow), "destroy",
      G_CALLBACK (on_window_destroy), NULL);
  vbox = gtk_box_new (GTK_ORIENTATION_VERTICAL, 6);

  drawingarea = gtk_drawing_area_new ();
  gtk_widget_set_size_request (drawingarea, spect_bands, spect_height);
  g_signal_connect (G_OBJECT (drawingarea), "configure-event",
      G_CALLBACK (on_configure_event), (gpointer) spectrum);
  gtk_box_pack_start (GTK_BOX (vbox), drawingarea, TRUE, TRUE, 0);

  hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 20);

  for (i = 0; i < num_bands; i++) {
    GObject *band;
    gdouble freq;
    gdouble bw;
    gdouble gain;
    gchar *label;
    GtkWidget *frame, *scales_hbox;

    band = gst_child_proxy_get_child_by_index (GST_CHILD_PROXY (equalizer), i);
    g_assert (band != NULL);
    g_object_get (band, "freq", &freq, NULL);
    g_object_get (band, "bandwidth", &bw, NULL);
    g_object_get (band, "gain", &gain, NULL);

    label = g_strdup_printf ("%d Hz", (int) (freq + 0.5));
    frame = gtk_frame_new (label);
    g_free (label);

    scales_hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 6);

    /* Create gain scale */
    scale = gtk_scale_new_with_range (GTK_ORIENTATION_VERTICAL,
        -24.0, 12.0, 0.5);
    gtk_scale_set_draw_value (GTK_SCALE (scale), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (scale), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (scale), gain);
    gtk_widget_set_size_request (scale, 35, 150);
    g_signal_connect (G_OBJECT (scale), "value-changed",
        G_CALLBACK (on_gain_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), scale, FALSE, FALSE, 0);

    /* Create bandwidth scale */
    scale = gtk_scale_new_with_range (GTK_ORIENTATION_VERTICAL,
        0.0, 20000.0, 5.0);
    gtk_scale_set_draw_value (GTK_SCALE (scale), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (scale), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (scale), bw);
    gtk_widget_set_size_request (scale, 45, 150);
    g_signal_connect (G_OBJECT (scale), "value-changed",
        G_CALLBACK (on_bandwidth_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), scale, TRUE, TRUE, 0);

    /* Create frequency scale */
    scale = gtk_scale_new_with_range (GTK_ORIENTATION_VERTICAL,
        20.0, 20000.0, 5.0);
    gtk_scale_set_draw_value (GTK_SCALE (scale), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (scale), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (scale), freq);
    gtk_widget_set_size_request (scale, 45, 150);
    g_signal_connect (G_OBJECT (scale), "value-changed",
        G_CALLBACK (on_freq_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), scale, TRUE, TRUE, 0);

    gtk_container_add (GTK_CONTAINER (frame), scales_hbox);

    gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
  }

  gtk_box_pack_start (GTK_BOX (vbox), hbox, TRUE, TRUE, 0);

  gtk_container_add (GTK_CONTAINER (appwindow), vbox);
  gtk_widget_show_all (appwindow);

  gst_element_set_state (bin, GST_STATE_PLAYING);
  gtk_main ();
  gst_element_set_state (bin, GST_STATE_NULL);

  gst_object_unref (bin);

  return 0;
}
Beispiel #24
0
static GstFlowReturn
gst_real_audio_demux_parse_header (GstRealAudioDemux * demux)
{
  const guint8 *data;
  gchar *codec_name = NULL;
  GstCaps *caps = NULL;
  GstEvent *event;
  gchar *stream_id;
  guint avail;

  g_assert (demux->ra_version == 4 || demux->ra_version == 3);

  avail = gst_adapter_available (demux->adapter);
  if (avail < 16)
    return GST_FLOW_OK;

  if (!gst_real_audio_demux_get_data_offset_from_header (demux))
    return GST_FLOW_ERROR;      /* shouldn't happen */

  GST_DEBUG_OBJECT (demux, "data_offset  = %u", demux->data_offset);

  if (avail + 6 < demux->data_offset) {
    GST_DEBUG_OBJECT (demux, "Need %u bytes, but only %u available now",
        demux->data_offset - 6, avail);
    return GST_FLOW_OK;
  }

  data = gst_adapter_map (demux->adapter, demux->data_offset - 6);
  g_assert (data);

  switch (demux->ra_version) {
    case 3:
      demux->fourcc = GST_RM_AUD_14_4;
      demux->packet_size = 20;
      demux->sample_rate = 8000;
      demux->channels = 1;
      demux->sample_width = 16;
      demux->flavour = 1;
      demux->leaf_size = 0;
      demux->height = 0;
      break;
    case 4:
      demux->flavour = GST_READ_UINT16_BE (data + 16);
      /* demux->frame_size = GST_READ_UINT32_BE (data + 36); */
      demux->leaf_size = GST_READ_UINT16_BE (data + 38);
      demux->height = GST_READ_UINT16_BE (data + 34);
      demux->packet_size = GST_READ_UINT32_BE (data + 18);
      demux->sample_rate = GST_READ_UINT16_BE (data + 42);
      demux->sample_width = GST_READ_UINT16_BE (data + 46);
      demux->channels = GST_READ_UINT16_BE (data + 48);
      demux->fourcc = GST_READ_UINT32_LE (data + 56);
      demux->pending_tags = gst_rm_utils_read_tags (data + 63,
          demux->data_offset - 63, gst_rm_utils_read_string8);
      if (demux->pending_tags)
        gst_tag_list_set_scope (demux->pending_tags, GST_TAG_SCOPE_GLOBAL);
      break;
    default:
      g_assert_not_reached ();
#if 0
    case 5:
      demux->flavour = GST_READ_UINT16_BE (data + 16);
      /* demux->frame_size = GST_READ_UINT32_BE (data + 36); */
      demux->leaf_size = GST_READ_UINT16_BE (data + 38);
      demux->height = GST_READ_UINT16_BE (data + 34);

      demux->sample_rate = GST_READ_UINT16_BE (data + 48);
      demux->sample_width = GST_READ_UINT16_BE (data + 52);
      demux->n_channels = GST_READ_UINT16_BE (data + 54);
      demux->fourcc = RMDEMUX_FOURCC_GET (data + 60);
      break;
#endif
  }

  GST_INFO_OBJECT (demux, "packet_size  = %u", demux->packet_size);
  GST_INFO_OBJECT (demux, "sample_rate  = %u", demux->sample_rate);
  GST_INFO_OBJECT (demux, "sample_width = %u", demux->sample_width);
  GST_INFO_OBJECT (demux, "channels     = %u", demux->channels);
  GST_INFO_OBJECT (demux, "fourcc       = '%" GST_FOURCC_FORMAT "' (%08X)",
      GST_FOURCC_ARGS (demux->fourcc), demux->fourcc);

  switch (demux->fourcc) {
    case GST_RM_AUD_14_4:
      caps = gst_caps_new_simple ("audio/x-pn-realaudio", "raversion",
          G_TYPE_INT, 1, NULL);
      demux->byterate_num = 1000;
      demux->byterate_denom = 1;
      break;

    case GST_RM_AUD_28_8:
      /* FIXME: needs descrambling */
      caps = gst_caps_new_simple ("audio/x-pn-realaudio", "raversion",
          G_TYPE_INT, 2, NULL);
      break;

    case GST_RM_AUD_DNET:
      caps = gst_caps_new_simple ("audio/x-ac3", "rate", G_TYPE_INT,
          demux->sample_rate, NULL);
      if (demux->packet_size == 0 || demux->sample_rate == 0)
        goto broken_file;
      demux->byterate_num = demux->packet_size * demux->sample_rate;
      demux->byterate_denom = 1536;
      break;

      /* Sipro/ACELP.NET Voice Codec (MIME unknown) */
    case GST_RM_AUD_SIPR:
      caps = gst_caps_new_empty_simple ("audio/x-sipro");
      break;

    default:
      GST_WARNING_OBJECT (demux, "unknown fourcc %08X", demux->fourcc);
      break;
  }

  if (caps == NULL)
    goto unknown_fourcc;

  gst_caps_set_simple (caps,
      "flavor", G_TYPE_INT, demux->flavour,
      "rate", G_TYPE_INT, demux->sample_rate,
      "channels", G_TYPE_INT, demux->channels,
      "width", G_TYPE_INT, demux->sample_width,
      "leaf_size", G_TYPE_INT, demux->leaf_size,
      "packet_size", G_TYPE_INT, demux->packet_size,
      "height", G_TYPE_INT, demux->height, NULL);

  GST_INFO_OBJECT (demux, "Adding source pad, caps %" GST_PTR_FORMAT, caps);
  demux->srcpad = gst_pad_new_from_static_template (&src_template, "src");
  gst_pad_set_event_function (demux->srcpad,
      GST_DEBUG_FUNCPTR (gst_real_audio_demux_src_event));
  gst_pad_set_query_function (demux->srcpad,
      GST_DEBUG_FUNCPTR (gst_real_audio_demux_src_query));
  gst_pad_set_active (demux->srcpad, TRUE);
  gst_pad_use_fixed_caps (demux->srcpad);

  stream_id =
      gst_pad_create_stream_id (demux->srcpad, GST_ELEMENT_CAST (demux), NULL);

  event = gst_pad_get_sticky_event (demux->sinkpad, GST_EVENT_STREAM_START, 0);
  if (event) {
    if (gst_event_parse_group_id (event, &demux->group_id))
      demux->have_group_id = TRUE;
    else
      demux->have_group_id = FALSE;
    gst_event_unref (event);
  } else if (!demux->have_group_id) {
    demux->have_group_id = TRUE;
    demux->group_id = gst_util_group_id_next ();
  }

  event = gst_event_new_stream_start (stream_id);
  if (demux->have_group_id)
    gst_event_set_group_id (event, demux->group_id);

  gst_pad_push_event (demux->srcpad, event);
  g_free (stream_id);

  gst_pad_set_caps (demux->srcpad, caps);
  codec_name = gst_pb_utils_get_codec_description (caps);
  gst_caps_unref (caps);

  gst_element_add_pad (GST_ELEMENT (demux), demux->srcpad);

  if (demux->byterate_num > 0 && demux->byterate_denom > 0) {
    GstFormat bformat = GST_FORMAT_BYTES;
    gint64 size_bytes = 0;

    GST_INFO_OBJECT (demux, "byte rate = %u/%u = %u bytes/sec",
        demux->byterate_num, demux->byterate_denom,
        demux->byterate_num / demux->byterate_denom);

    if (gst_pad_peer_query_duration (demux->sinkpad, bformat, &size_bytes)) {
      demux->duration =
          gst_real_demux_get_timestamp_from_offset (demux, size_bytes);
      demux->upstream_size = size_bytes;
      GST_INFO_OBJECT (demux, "upstream_size = %" G_GUINT64_FORMAT,
          demux->upstream_size);
      GST_INFO_OBJECT (demux, "duration      = %" GST_TIME_FORMAT,
          GST_TIME_ARGS (demux->duration));
    }
  }

  demux->need_newsegment = TRUE;

  if (codec_name) {
    if (demux->pending_tags == NULL) {
      demux->pending_tags = gst_tag_list_new_empty ();
      gst_tag_list_set_scope (demux->pending_tags, GST_TAG_SCOPE_GLOBAL);
    }

    gst_tag_list_add (demux->pending_tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_AUDIO_CODEC, codec_name, NULL);
    g_free (codec_name);
  }

  gst_adapter_unmap (demux->adapter);
  gst_adapter_flush (demux->adapter, demux->data_offset - 6);

  demux->state = REAL_AUDIO_DEMUX_STATE_DATA;
  demux->need_newsegment = TRUE;

  return GST_FLOW_OK;

/* ERRORS */
unknown_fourcc:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (demux), STREAM, DECODE, (NULL),
        ("Unknown fourcc '0x%" G_GINT32_MODIFIER "x'", demux->fourcc));
    return GST_FLOW_ERROR;
  }
broken_file:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (demux), STREAM, DECODE, (NULL),
        ("Broken file - invalid sample_rate or other header value"));
    return GST_FLOW_ERROR;
  }

}
Beispiel #25
0
static gboolean
gst_rtp_bv_depay_setcaps (GstRTPBaseDepayload * depayload, GstCaps * caps)
{
  GstRTPBVDepay *rtpbvdepay = GST_RTP_BV_DEPAY (depayload);
  GstCaps *srccaps;
  GstStructure *structure;
  const gchar *mode_str = NULL;
  gint mode, clock_rate, expected_rate;
  gboolean ret;

  structure = gst_caps_get_structure (caps, 0);

  mode_str = gst_structure_get_string (structure, "encoding-name");
  if (!mode_str)
    goto no_mode;

  if (!strcmp (mode_str, "BV16")) {
    mode = 16;
    expected_rate = 8000;
  } else if (!strcmp (mode_str, "BV32")) {
    mode = 32;
    expected_rate = 16000;
  } else
    goto invalid_mode;

  if (!gst_structure_get_int (structure, "clock-rate", &clock_rate))
    clock_rate = expected_rate;
  else if (clock_rate != expected_rate)
    goto wrong_rate;

  depayload->clock_rate = clock_rate;
  rtpbvdepay->mode = mode;

  srccaps = gst_caps_new_simple ("audio/x-bv",
      "mode", G_TYPE_INT, rtpbvdepay->mode, NULL);
  ret = gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload), srccaps);

  GST_DEBUG ("set caps on source: %" GST_PTR_FORMAT " (ret=%d)", srccaps, ret);
  gst_caps_unref (srccaps);

  return ret;

  /* ERRORS */
no_mode:
  {
    GST_ERROR_OBJECT (rtpbvdepay, "did not receive an encoding-name");
    return FALSE;
  }
invalid_mode:
  {
    GST_ERROR_OBJECT (rtpbvdepay,
        "invalid encoding-name, expected BV16 or BV32, got %s", mode_str);
    return FALSE;
  }
wrong_rate:
  {
    GST_ERROR_OBJECT (rtpbvdepay, "invalid clock-rate, expected %d, got %d",
        expected_rate, clock_rate);
    return FALSE;
  }
}
static gboolean
gst_openjpeg_enc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstOpenJPEGEnc *self = GST_OPENJPEG_ENC (encoder);
  GstCaps *allowed_caps, *caps;
  GstStructure *s;
  const gchar *colorspace;
  gint ncomps;

  GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);

  if (self->input_state)
    gst_video_codec_state_unref (self->input_state);
  self->input_state = gst_video_codec_state_ref (state);

  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  allowed_caps = gst_caps_truncate (allowed_caps);
  s = gst_caps_get_structure (allowed_caps, 0);
  if (gst_structure_has_name (s, "image/jp2")) {
    self->codec_format = OPJ_CODEC_JP2;
    self->is_jp2c = FALSE;
  } else if (gst_structure_has_name (s, "image/x-j2c")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = TRUE;
  } else if (gst_structure_has_name (s, "image/x-jpc")) {
    self->codec_format = OPJ_CODEC_J2K;
    self->is_jp2c = FALSE;
  } else {
    g_return_val_if_reached (FALSE);
  }

  switch (state->info.finfo->format) {
    case GST_VIDEO_FORMAT_ARGB64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_ARGB:
      self->fill_image = fill_image_packed8_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_xRGB:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV64:
      self->fill_image = fill_image_packed16_4;
      ncomps = 4;
      break;
    case GST_VIDEO_FORMAT_Y444_10LE:
    case GST_VIDEO_FORMAT_Y444_10BE:
    case GST_VIDEO_FORMAT_I422_10LE:
    case GST_VIDEO_FORMAT_I422_10BE:
    case GST_VIDEO_FORMAT_I420_10LE:
    case GST_VIDEO_FORMAT_I420_10BE:
      self->fill_image = fill_image_planar16_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_AYUV:
      self->fill_image = fill_image_packed8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_Y444:
    case GST_VIDEO_FORMAT_Y42B:
    case GST_VIDEO_FORMAT_I420:
    case GST_VIDEO_FORMAT_Y41B:
    case GST_VIDEO_FORMAT_YUV9:
      self->fill_image = fill_image_planar8_3;
      ncomps = 3;
      break;
    case GST_VIDEO_FORMAT_GRAY8:
      self->fill_image = fill_image_planar8_1;
      ncomps = 1;
      break;
    case GST_VIDEO_FORMAT_GRAY16_LE:
    case GST_VIDEO_FORMAT_GRAY16_BE:
      self->fill_image = fill_image_planar16_1;
      ncomps = 1;
      break;
    default:
      g_assert_not_reached ();
  }

  if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_YUV))
    colorspace = "sYUV";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_RGB))
    colorspace = "sRGB";
  else if ((state->info.finfo->flags & GST_VIDEO_FORMAT_FLAG_GRAY))
    colorspace = "GRAY";
  else
    g_return_val_if_reached (FALSE);

  caps = gst_caps_new_simple (gst_structure_get_name (s),
      "colorspace", G_TYPE_STRING, colorspace,
      "num-components", G_TYPE_INT, ncomps, NULL);
  gst_caps_unref (allowed_caps);

  if (self->output_state)
    gst_video_codec_state_unref (self->output_state);
  self->output_state =
      gst_video_encoder_set_output_state (encoder, caps, state);

  gst_video_encoder_negotiate (GST_VIDEO_ENCODER (encoder));

  return TRUE;
}
bool ofGstVideoPlayer::loadMovie(string name){
	close();
	if( name.find( "file://",0 ) != string::npos){
		bIsStream		= false;
	}else if( name.find( "://",0 ) == string::npos){
		name 			= "file:///"+ofToDataPath(name,true);
		bIsStream		= false;
	}else{
		bIsStream		= true;
	}
	ofLog(OF_LOG_VERBOSE,"loading "+name);

	ofGstUtils::startGstMainLoop();

#if GST_VERSION_MAJOR==0
	GstElement * gstPipeline = gst_element_factory_make("playbin2","player");
#else
	GstElement * gstPipeline = gst_element_factory_make("playbin","player");
#endif
	g_object_set(G_OBJECT(gstPipeline), "uri", name.c_str(), (void*)NULL);

	// create the oF appsink for video rgb without sync to clock
	GstElement * gstSink = gst_element_factory_make("appsink", "app_sink");

	gst_base_sink_set_sync(GST_BASE_SINK(gstSink), true);
	gst_app_sink_set_max_buffers(GST_APP_SINK(gstSink), 8);
	gst_app_sink_set_drop (GST_APP_SINK(gstSink),true);
	gst_base_sink_set_max_lateness  (GST_BASE_SINK(gstSink), -1);

#if GST_VERSION_MAJOR==0
	int bpp;
	string mime;
	switch(internalPixelFormat){
	case OF_PIXELS_MONO:
		mime = "video/x-raw-gray";
		bpp = 8;
		break;
	case OF_PIXELS_RGB:
		mime = "video/x-raw-rgb";
		bpp = 24;
		break;
	case OF_PIXELS_RGBA:
	case OF_PIXELS_BGRA:
		mime = "video/x-raw-rgb";
		bpp = 32;
		break;
	default:
		mime = "video/x-raw-rgb";
		bpp=24;
		break;
	}

	GstCaps *caps = gst_caps_new_simple(mime.c_str(),
										"bpp", G_TYPE_INT, bpp,
										"depth", G_TYPE_INT, 24,
										"endianness",G_TYPE_INT,4321,
										"red_mask",G_TYPE_INT,0xff0000,
										"green_mask",G_TYPE_INT,0x00ff00,
										"blue_mask",G_TYPE_INT,0x0000ff,
										"alpha_mask",G_TYPE_INT,0x000000ff,
										NULL);
#else
	int bpp;
	string mime="video/x-raw";
	string format;
	switch(internalPixelFormat){
	case OF_PIXELS_MONO:
		format = "GRAY8";
		bpp = 8;
		break;
	case OF_PIXELS_RGB:
		format = "RGB";
		bpp = 24;
		break;
	case OF_PIXELS_RGBA:
		format = "RGBA";
		bpp = 32;
		break;
	case OF_PIXELS_BGRA:
		format = "BGRA";
		bpp = 32;
		break;
	default:
		format = "RGB";
		bpp=24;
		break;
	}

	GstCaps *caps = gst_caps_new_simple(mime.c_str(),
										"format", G_TYPE_STRING, format.c_str(),
										/*"bpp", G_TYPE_INT, bpp,
										"depth", G_TYPE_INT, 24,
										"endianness",G_TYPE_INT,4321,
										"red_mask",G_TYPE_INT,0xff0000,
										"green_mask",G_TYPE_INT,0x00ff00,
										"blue_mask",G_TYPE_INT,0x0000ff,
										"alpha_mask",G_TYPE_INT,0x000000ff,*/
										NULL);
#endif


	gst_app_sink_set_caps(GST_APP_SINK(gstSink), caps);
	gst_caps_unref(caps);

	if(threadAppSink){
		GstElement * appQueue = gst_element_factory_make("queue","appsink_queue");
		g_object_set(G_OBJECT(appQueue), "leaky", 0, "silent", 1, (void*)NULL);
		GstElement* appBin = gst_bin_new("app_bin");
		gst_bin_add(GST_BIN(appBin), appQueue);
		GstPad* appQueuePad = gst_element_get_static_pad(appQueue, "sink");
		GstPad* ghostPad = gst_ghost_pad_new("app_bin_sink", appQueuePad);
		gst_object_unref(appQueuePad);
		gst_element_add_pad(appBin, ghostPad);

		gst_bin_add_many(GST_BIN(appBin), gstSink, NULL);
		gst_element_link_many(appQueue, gstSink, NULL);

		g_object_set (G_OBJECT(gstPipeline),"video-sink",appBin,(void*)NULL);
	}else{
		g_object_set (G_OBJECT(gstPipeline),"video-sink",gstSink,(void*)NULL);
	}

#ifdef TARGET_WIN32
	GstElement *audioSink = gst_element_factory_make("directsoundsink", NULL);
	g_object_set (G_OBJECT(gstPipeline),"audio-sink",audioSink,(void*)NULL);

#endif


	videoUtils.setPipelineWithSink(gstPipeline,gstSink,bIsStream);
	if(!bIsStream) return allocate(bpp);
	else return true;
}
Beispiel #28
0
bool ofGstVideoPlayer::createPipeline(string name){
#ifndef OF_USE_GST_GL
#if GST_VERSION_MAJOR==0
	GstCaps *caps;
	int bpp;
	switch(internalPixelFormat){
	case OF_PIXELS_GRAY:
		bpp = 8;
		caps = gst_caps_new_simple("video/x-raw-gray",
			"bpp", G_TYPE_INT, bpp,
			"depth", G_TYPE_INT, 8,
			NULL);
		break;
	case OF_PIXELS_RGB:
		bpp = 24;
		caps = gst_caps_new_simple("video/x-raw-rgb",
			"bpp", G_TYPE_INT, bpp,
			"depth", G_TYPE_INT, 24,
			"endianness",G_TYPE_INT,4321,
			"red_mask",G_TYPE_INT,0xff0000,
			"green_mask",G_TYPE_INT,0x00ff00,
			"blue_mask",G_TYPE_INT,0x0000ff,
			NULL);
		break;
	case OF_PIXELS_RGBA:
		bpp = 32;
		caps = gst_caps_new_simple("video/x-raw-rgb",
			"bpp", G_TYPE_INT, bpp,
			"depth", G_TYPE_INT, 32,
			"endianness",G_TYPE_INT,4321,
			"red_mask",G_TYPE_INT,0xff000000,
			"green_mask",G_TYPE_INT,0x00ff0000,
			"blue_mask",G_TYPE_INT,0x0000ff00,
			"alpha_mask",G_TYPE_INT,0x000000ff,
			NULL);
		break;
	case OF_PIXELS_BGRA:
		bpp = 32;
		caps = gst_caps_new_simple("video/x-raw-rgb",
			"bpp", G_TYPE_INT, bpp,
			"depth", G_TYPE_INT, 32,
			"endianness",G_TYPE_INT,4321,
			"red_mask",G_TYPE_INT,0x0000ff00,
			"green_mask",G_TYPE_INT,0x00ff0000,
			"blue_mask",G_TYPE_INT,0xff000000,
			"alpha_mask",G_TYPE_INT,0x000000ff,
			NULL);
		break;
	default:
		bpp = 32;
		caps = gst_caps_new_simple("video/x-raw-rgb",
			"bpp", G_TYPE_INT, bpp,
			"depth", G_TYPE_INT, 24,
			"endianness",G_TYPE_INT,4321,
			"red_mask",G_TYPE_INT,0xff0000,
			"green_mask",G_TYPE_INT,0x00ff00,
			"blue_mask",G_TYPE_INT,0x0000ff,
			NULL);
		break;
	}
#else
	string mime="video/x-raw";

	GstCaps *caps;
	if(internalPixelFormat==OF_PIXELS_NATIVE){
		caps = gst_caps_from_string((mime + ",format={RGBA,BGRA,RGB,BGR,RGB16,GRAY8,YV12,I420,NV12,NV21,YUY2}").c_str());
	}else{
		string format = ofGstVideoUtils::getGstFormatName(internalPixelFormat);
		caps = gst_caps_new_simple(mime.c_str(),
											"format", G_TYPE_STRING, format.c_str(),
											NULL);
	}
#endif

	#if GST_VERSION_MAJOR==0
		GstElement * gstPipeline = gst_element_factory_make("playbin2","player");
	#else
		GstElement * gstPipeline = gst_element_factory_make("playbin","player");
	#endif
	g_object_ref_sink(gstPipeline);
	g_object_set(G_OBJECT(gstPipeline), "uri", name.c_str(), (void*)NULL);

	// create the oF appsink for video rgb without sync to clock
	GstElement * gstSink = gst_element_factory_make("appsink", "app_sink");
	gst_app_sink_set_caps(GST_APP_SINK(gstSink), caps);
	gst_caps_unref(caps);

	if(threadAppSink){
		GstElement * appQueue = gst_element_factory_make("queue","appsink_queue");
		g_object_set(G_OBJECT(appQueue), "leaky", 0, "silent", 1, (void*)NULL);
		GstElement* appBin = gst_bin_new("app_bin");
		gst_bin_add(GST_BIN(appBin), appQueue);
		GstPad* appQueuePad = gst_element_get_static_pad(appQueue, "sink");
		GstPad* ghostPad = gst_ghost_pad_new("app_bin_sink", appQueuePad);
		gst_object_unref(appQueuePad);
		gst_element_add_pad(appBin, ghostPad);

		gst_bin_add(GST_BIN(appBin), gstSink);
		gst_element_link(appQueue, gstSink);

		g_object_set (G_OBJECT(gstPipeline),"video-sink",appBin,(void*)NULL);
	}else{
		g_object_set (G_OBJECT(gstPipeline),"video-sink",gstSink,(void*)NULL);
	}

	#ifdef TARGET_WIN32
		GstElement *audioSink = gst_element_factory_make("directsoundsink", NULL);
		g_object_set (G_OBJECT(gstPipeline),"audio-sink",audioSink,(void*)NULL);
	#endif

	return videoUtils.setPipelineWithSink(gstPipeline,gstSink,bIsStream);

#else
	/*auto gstPipeline = gst_parse_launch(("uridecodebin uri=" + name + " ! glcolorscale name=gl_filter ! appsink name=app_sink").c_str(),NULL);
	auto gstSink = gst_bin_get_by_name(GST_BIN(gstPipeline),"app_sink");
	auto glfilter = gst_bin_get_by_name(GST_BIN(gstPipeline),"gl_filter");
	gst_app_sink_set_caps(GST_APP_SINK(gstSink), caps);
	gst_caps_unref(caps);

	glXMakeCurrent (ofGetX11Display(), None, 0);
	glDisplay = (GstGLDisplay *)gst_gl_display_x11_new_with_display(ofGetX11Display());
	glContext = gst_gl_context_new_wrapped (glDisplay, (guintptr) ofGetGLXContext(),
	    		  GST_GL_PLATFORM_GLX, GST_GL_API_OPENGL);

	g_object_set (G_OBJECT (glfilter), "other-context", glContext, NULL);

	// FIXME: this seems to be the way to add the context in 1.4.5
	//
	// GstBus * bus = gst_pipeline_get_bus (GST_PIPELINE(gstPipeline));
	// gst_bus_enable_sync_message_emission (bus);
	// g_signal_connect (bus, "sync-message", G_CALLBACK (sync_bus_call), this);
	// gst_object_unref(bus);

	auto ret = videoUtils.setPipelineWithSink(gstPipeline,gstSink,bIsStream);
	glXMakeCurrent (ofGetX11Display(), ofGetX11Window(), ofGetGLXContext());
	return ret;*/

	return videoUtils.setPipeline("uridecodebin uri=" + name,internalPixelFormat,bIsStream,-1,-1);
	//return videoUtils.setPipeline("filesrc location=" + name + " ! qtdemux ",internalPixelFormat,bIsStream,-1,-1);
#endif
}
bool GstEnginePipeline::Init() {
  // Here we create all the parts of the gstreamer pipeline - from the source
  // to the sink.  The parts of the pipeline are split up into bins:
  //   uri decode bin -> audio bin
  // The uri decode bin is a gstreamer builtin that automatically picks the
  // right type of source and decoder for the URI.

  // The audio bin gets created here and contains:
  //   queue ! audioconvert ! <caps32>
  //         ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee
  // rgvolume and rglimiter are only created when replaygain is enabled.

  // After the tee the pipeline splits.  One split is converted to 16-bit int
  // samples for the scope, the other is kept as float32 and sent to the
  // speaker.
  //   tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink
  //   tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale
  //        ! convert ! audiosink

  gst_segment_init(&last_decodebin_segment_, GST_FORMAT_TIME);

  // Audio bin
  audiobin_ = gst_bin_new("audiobin");
  gst_bin_add(GST_BIN(pipeline_), audiobin_);

  // Create the sink
  if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false;

  if (g_object_class_find_property(G_OBJECT_GET_CLASS(audiosink_), "device") &&
      !device_.toString().isEmpty()) {
    switch (device_.type()) {
      case QVariant::Int:
        g_object_set(G_OBJECT(audiosink_), "device", device_.toInt(), nullptr);
        break;
      case QVariant::String:
        g_object_set(G_OBJECT(audiosink_), "device",
                     device_.toString().toUtf8().constData(), nullptr);
        break;

#ifdef Q_OS_WIN32
      case QVariant::ByteArray: {
        GUID guid = QUuid(device_.toByteArray());
        g_object_set(G_OBJECT(audiosink_), "device", &guid, nullptr);
        break;
      }
#endif  // Q_OS_WIN32

      default:
        qLog(Warning) << "Unknown device type" << device_;
        break;
    }
  }

  // Create all the other elements
  GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue,
      *convert;

  queue_ = engine_->CreateElement("queue2", audiobin_);
  audioconvert_ = engine_->CreateElement("audioconvert", audiobin_);
  tee = engine_->CreateElement("tee", audiobin_);

  probe_queue = engine_->CreateElement("queue", audiobin_);
  probe_converter = engine_->CreateElement("audioconvert", audiobin_);
  probe_sink = engine_->CreateElement("fakesink", audiobin_);

  audio_queue = engine_->CreateElement("queue", audiobin_);
  equalizer_preamp_ = engine_->CreateElement("volume", audiobin_);
  equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_);
  stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_);
  volume_ = engine_->CreateElement("volume", audiobin_);
  audioscale_ = engine_->CreateElement("audioresample", audiobin_);
  convert = engine_->CreateElement("audioconvert", audiobin_);

  if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter ||
      !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ ||
      !stereo_panorama_ || !volume_ || !audioscale_ || !convert) {
    return false;
  }

  // Create the replaygain elements if it's enabled.  event_probe is the
  // audioconvert element we attach the probe to, which will change depending
  // on whether replaygain is enabled.  convert_sink is the element after the
  // first audioconvert, which again will change.
  GstElement* event_probe = audioconvert_;
  GstElement* convert_sink = tee;

  if (rg_enabled_) {
    rgvolume_ = engine_->CreateElement("rgvolume", audiobin_);
    rglimiter_ = engine_->CreateElement("rglimiter", audiobin_);
    audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_);
    event_probe = audioconvert2_;
    convert_sink = rgvolume_;

    if (!rgvolume_ || !rglimiter_ || !audioconvert2_) {
      return false;
    }

    // Set replaygain settings
    g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr);
    g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr);
    g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_),
                 nullptr);
  }

  // Create a pad on the outside of the audiobin and connect it to the pad of
  // the first element.
  GstPad* pad = gst_element_get_static_pad(queue_, "sink");
  gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
  gst_object_unref(pad);

  // Add a data probe on the src pad of the audioconvert element for our scope.
  // We do it here because we want pre-equalized and pre-volume samples
  // so that our visualization are not be affected by them.
  pad = gst_element_get_static_pad(event_probe, "src");
  gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM,
                    &EventHandoffCallback, this, NULL);
  gst_object_unref(pad);

  // Configure the fakesink properly
  g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr);

  // Setting the equalizer bands:
  //
  // GStreamer's GstIirEqualizerNBands sets up shelve filters for the first and
  // last bands as corner cases. That was causing the "inverted slider" bug.
  // As a workaround, we create two dummy bands at both ends of the spectrum.
  // This causes the actual first and last adjustable bands to be
  // implemented using band-pass filters.

  g_object_set(G_OBJECT(equalizer_), "num-bands", 10 + 2, nullptr);

  // Dummy first band (bandwidth 0, cutting below 20Hz):
  GstObject* first_band = GST_OBJECT(
      gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), 0));
  g_object_set(G_OBJECT(first_band), "freq", 20.0, "bandwidth", 0, "gain", 0.0f,
               nullptr);
  g_object_unref(G_OBJECT(first_band));

  // Dummy last band (bandwidth 0, cutting over 20KHz):
  GstObject* last_band = GST_OBJECT(gst_child_proxy_get_child_by_index(
      GST_CHILD_PROXY(equalizer_), kEqBandCount + 1));
  g_object_set(G_OBJECT(last_band), "freq", 20000.0, "bandwidth", 0, "gain",
               0.0f, nullptr);
  g_object_unref(G_OBJECT(last_band));

  int last_band_frequency = 0;
  for (int i = 0; i < kEqBandCount; ++i) {
    const int index_in_eq = i + 1;
    GstObject* band = GST_OBJECT(gst_child_proxy_get_child_by_index(
        GST_CHILD_PROXY(equalizer_), index_in_eq));

    const float frequency = kEqBandFrequencies[i];
    const float bandwidth = frequency - last_band_frequency;
    last_band_frequency = frequency;

    g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth,
                 "gain", 0.0f, nullptr);
    g_object_unref(G_OBJECT(band));
  }

  // Set the stereo balance.
  g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_,
               nullptr);

  // Set the buffer duration.  We set this on this queue instead of the
  // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin
  // only affects network sources.
  // Disable the default buffer and byte limits, so we only buffer based on
  // time.
  g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_,
               nullptr);
  g_object_set(G_OBJECT(queue_), "low-percent", buffer_min_fill_, nullptr);

  if (buffer_duration_nanosec_ > 0) {
    g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr);
  }

  gst_element_link_many(queue_, audioconvert_, convert_sink, nullptr);
  gst_element_link(probe_converter, probe_sink);

  // Link the outputs of tee to the queues on each path.
  gst_pad_link(gst_element_get_request_pad(tee, "src_%u"),
               gst_element_get_static_pad(probe_queue, "sink"));
  gst_pad_link(gst_element_get_request_pad(tee, "src_%u"),
               gst_element_get_static_pad(audio_queue, "sink"));

  // Link replaygain elements if enabled.
  if (rg_enabled_) {
    gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr);
  }

  // Link the analyzer output of the tee and force 16 bit caps
  GstCaps* caps16 = gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING,
                                        "S16LE", NULL);
  gst_element_link_filtered(probe_queue, probe_converter, caps16);
  gst_caps_unref(caps16);

  gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_,
                        stereo_panorama_, volume_, audioscale_, convert,
                        nullptr);

  // We only limit the media type to raw audio.
  // Let the audio output of the tee autonegotiate the bit depth and format.
  GstCaps* caps = gst_caps_new_empty_simple("audio/x-raw");

  // Add caps for fixed sample rate and mono, but only if requested
  if (sample_rate_ != GstEngine::kAutoSampleRate && sample_rate_ > 0) {
    gst_caps_set_simple(caps, "rate", G_TYPE_INT, sample_rate_, nullptr);
  }

  if (mono_playback_) {
    gst_caps_set_simple(caps, "channels", G_TYPE_INT, 1, nullptr);
  }

  gst_element_link_filtered(convert, audiosink_, caps);
  gst_caps_unref(caps);

  // Add probes and handlers.
  gst_pad_add_probe(gst_element_get_static_pad(probe_converter, "src"),
                    GST_PAD_PROBE_TYPE_BUFFER, HandoffCallback, this, nullptr);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           BusCallbackSync, this, nullptr);
  bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                                 BusCallback, this);

  MaybeLinkDecodeToAudio();

  return true;
}
static GstFlowReturn
gst_dirac_parse_handle_frame (GstBaseParse * parse,
    GstBaseParseFrame * frame, gint * skipsize)
{
  int off;
  guint32 next_header;
  GstMapInfo map;
  guint8 *data;
  gsize size;
  gboolean have_picture = FALSE;
  int offset;
  guint framesize = 0;

  gst_buffer_map (frame->buffer, &map, GST_MAP_READ);
  data = map.data;
  size = map.size;

  if (G_UNLIKELY (size < 13)) {
    *skipsize = 1;
    goto out;
  }

  GST_DEBUG ("%" G_GSIZE_FORMAT ": %02x %02x %02x %02x", size, data[0], data[1],
      data[2], data[3]);

  if (GST_READ_UINT32_BE (data) != 0x42424344) {
    GstByteReader reader;

    gst_byte_reader_init (&reader, data, size);
    off = gst_byte_reader_masked_scan_uint32 (&reader, 0xffffffff,
        0x42424344, 0, size);

    if (off < 0) {
      *skipsize = size - 3;
      goto out;
    }

    GST_LOG_OBJECT (parse, "possible sync at buffer offset %d", off);

    GST_DEBUG ("skipping %d", off);
    *skipsize = off;
    goto out;
  }

  /* have sync, parse chunks */

  offset = 0;
  while (!have_picture) {
    GST_DEBUG ("offset %d:", offset);

    if (offset + 13 >= size) {
      framesize = offset + 13;
      goto out;
    }

    GST_DEBUG ("chunk type %02x", data[offset + 4]);

    if (GST_READ_UINT32_BE (data + offset) != 0x42424344) {
      GST_DEBUG ("bad header");
      *skipsize = 3;
      goto out;
    }

    next_header = GST_READ_UINT32_BE (data + offset + 5);
    GST_DEBUG ("next_header %d", next_header);
    if (next_header == 0)
      next_header = 13;

    if (SCHRO_PARSE_CODE_IS_PICTURE (data[offset + 4])) {
      have_picture = TRUE;
    }

    offset += next_header;
    if (offset >= size) {
      framesize = offset;
      goto out;
    }
  }

  gst_buffer_unmap (frame->buffer, &map);

  framesize = offset;
  GST_DEBUG ("framesize %d", framesize);

  g_assert (framesize <= size);

  if (data[4] == SCHRO_PARSE_CODE_SEQUENCE_HEADER) {
    GstCaps *caps;
    GstDiracParse *diracparse = GST_DIRAC_PARSE (parse);
    DiracSequenceHeader sequence_header;
    int ret;

    ret = dirac_sequence_header_parse (&sequence_header, data + 13, size - 13);
    if (ret) {
      memcpy (&diracparse->sequence_header, &sequence_header,
          sizeof (sequence_header));
      caps = gst_caps_new_simple ("video/x-dirac",
          "width", G_TYPE_INT, sequence_header.width,
          "height", G_TYPE_INT, sequence_header.height,
          "framerate", GST_TYPE_FRACTION,
          sequence_header.frame_rate_numerator,
          sequence_header.frame_rate_denominator,
          "pixel-aspect-ratio", GST_TYPE_FRACTION,
          sequence_header.aspect_ratio_numerator,
          sequence_header.aspect_ratio_denominator,
          "interlaced", G_TYPE_BOOLEAN, sequence_header.interlaced,
          "profile", G_TYPE_INT, sequence_header.profile,
          "level", G_TYPE_INT, sequence_header.level, NULL);
      gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (parse), caps);
      gst_caps_unref (caps);

      gst_base_parse_set_frame_rate (parse,
          sequence_header.frame_rate_numerator,
          sequence_header.frame_rate_denominator, 0, 0);
    }
  }

  gst_base_parse_set_min_frame_size (parse, 13);

  return gst_base_parse_finish_frame (parse, frame, framesize);

out:
  gst_buffer_unmap (frame->buffer, &map);
  if (framesize)
    gst_base_parse_set_min_frame_size (parse, framesize);
  return GST_FLOW_OK;
}