// Creates a blank/empty frame (interpolate() must later be called).
FFTFrame::FFTFrame()
    : m_FFTSize(0)
    , m_log2FFTSize(0)
    , m_complexData(0)
{
    int fftLength = gst_fft_next_fast_length(m_FFTSize);
    m_fft = gst_fft_f32_new(fftLength, FALSE);
    m_inverseFft = gst_fft_f32_new(fftLength, TRUE);
}
// Normal constructor: allocates for a given fftSize.
FFTFrame::FFTFrame(unsigned fftSize)
    : m_FFTSize(fftSize)
    , m_log2FFTSize(static_cast<unsigned>(log2(fftSize)))
    , m_realData(unpackedFFTDataSize(m_FFTSize))
    , m_imagData(unpackedFFTDataSize(m_FFTSize))
{
    m_complexData = WTF::fastNewArray<GstFFTF32Complex>(unpackedFFTDataSize(m_FFTSize));

    int fftLength = gst_fft_next_fast_length(m_FFTSize);
    m_fft = gst_fft_f32_new(fftLength, FALSE);
    m_inverseFft = gst_fft_f32_new(fftLength, TRUE);
}
// Copy constructor.
FFTFrame::FFTFrame(const FFTFrame& frame)
    : m_FFTSize(frame.m_FFTSize)
    , m_log2FFTSize(frame.m_log2FFTSize)
    , m_realData(unpackedFFTDataSize(frame.m_FFTSize))
    , m_imagData(unpackedFFTDataSize(frame.m_FFTSize))
{
    m_complexData = WTF::fastNewArray<GstFFTF32Complex>(unpackedFFTDataSize(m_FFTSize));

    int fftLength = gst_fft_next_fast_length(m_FFTSize);
    m_fft = gst_fft_f32_new(fftLength, FALSE);
    m_inverseFft = gst_fft_f32_new(fftLength, TRUE);

    // Copy/setup frame data.
    memcpy(realData(), frame.realData(), sizeof(float) * unpackedFFTDataSize(m_FFTSize));
    memcpy(imagData(), frame.imagData(), sizeof(float) * unpackedFFTDataSize(m_FFTSize));
}
static void
gst_spectrum_alloc_channel_data (GstSpectrum * spectrum)
{
  gint i;
  GstSpectrumChannel *cd;
  guint bands = spectrum->bands;
  guint nfft = 2 * bands - 2;

  g_assert (spectrum->channel_data == NULL);

  spectrum->num_channels = (spectrum->multi_channel) ?
      GST_AUDIO_FILTER (spectrum)->format.channels : 1;

  GST_DEBUG_OBJECT (spectrum, "allocating data for %d channels",
      spectrum->num_channels);

  spectrum->channel_data = g_new (GstSpectrumChannel, spectrum->num_channels);
  for (i = 0; i < spectrum->num_channels; i++) {
    cd = &spectrum->channel_data[i];
    cd->fft_ctx = gst_fft_f32_new (nfft, FALSE);
    cd->input = g_new0 (gfloat, nfft);
    cd->input_tmp = g_new0 (gfloat, nfft);
    cd->freqdata = g_new0 (GstFFTF32Complex, bands);
    cd->spect_magnitude = g_new0 (gfloat, bands);
    cd->spect_phase = g_new0 (gfloat, bands);
  }
}
Пример #5
0
recur_audio_binner_new(int window_size, int window_type,
    int n_bins,
    float min_freq,
    float max_freq,
    float knee_freq,
    float focus_freq,
    float audio_rate,
    float scale,
    int value_size /*1 for real, 2 for complex*/
){
  RecurAudioBinner *ab = calloc(1, sizeof(*ab));
  ab->window_size = window_size;
  ab->window_type = window_type;
  ab->n_bins = n_bins;
  ab->pcm_data = malloc_aligned_or_die((window_size + 2) * sizeof(float));
  ab->freq_data = malloc_aligned_or_die((window_size + 2) * sizeof(float));
  ab->fft = gst_fft_f32_new(window_size, FALSE);

  float *mask = malloc_aligned_or_die((window_size + 2) * sizeof(float));
  recur_window_init(mask, window_size, window_type, scale);
  ab->mask = mask;

  ab->value_size = value_size;
  ab->slopes = recur_bin_slopes_new(n_bins,
      window_size / value_size,
      min_freq,
      max_freq,
      knee_freq,
      focus_freq,
      audio_rate
  );
  mfcc_slopes_dump(ab);
  ab->fft_bins = malloc_aligned_or_die((n_bins + 3) * sizeof(float));
  ab->dct_bins = malloc_aligned_or_die((n_bins + 2) * sizeof(float));
  mfcc_slopes_dump2(ab);
  return ab;
}
void
_bp_vis_pipeline_setup (BansheePlayer *player)
{
    // The basic pipeline we're constructing is:
    // .audiotee ! queue ! audioresample ! audioconvert ! fakesink

    GstElement *fakesink, *converter, *resampler, *audiosinkqueue;
    GstCaps *caps;
    GstPad *teepad;
    GstPad *pad;

    player->vis_buffer = NULL;
    player->vis_fft = gst_fft_f32_new (SLICE_SIZE * 2, FALSE);
    player->vis_fft_buffer = g_new (GstFFTF32Complex, SLICE_SIZE + 1);
    player->vis_fft_sample_buffer = g_new0 (gfloat, SLICE_SIZE);
    
    // Core elements, if something fails here, it's the end of the world
    audiosinkqueue = gst_element_factory_make ("queue", "vis-queue");

    pad = gst_element_get_static_pad (audiosinkqueue, "sink");
    gst_pad_add_event_probe (pad, G_CALLBACK (_bp_vis_pipeline_event_probe), player);
    gst_object_unref (GST_OBJECT (pad));

    resampler = gst_element_factory_make ("audioresample", "vis-resample");
    converter = gst_element_factory_make ("audioconvert", "vis-convert");
    fakesink = gst_element_factory_make ("fakesink", "vis-sink");

    if (audiosinkqueue == NULL || resampler == NULL || converter == NULL || fakesink == NULL) {
        bp_debug ("Could not construct visualization pipeline, a fundamental element could not be created");
        return;
    }

    // Keep around the 5 most recent seconds of audio so that when resuming
    // visualization we have something to show right away.
    g_object_set (G_OBJECT (audiosinkqueue),
            "leaky", 2,
            "max-size-buffers", 0,
            "max-size-bytes", 0,
            "max-size-time", GST_SECOND * 5,
            NULL);
    
    g_signal_connect (G_OBJECT (fakesink), "handoff", G_CALLBACK (bp_vis_pcm_handoff), player);

    g_object_set (G_OBJECT (fakesink),
            // This enables the handoff signal.
            "signal-handoffs", TRUE,
            // Synchronize so we see vis at the same time as we hear it.
            "sync", TRUE,
            // Drop buffers if they come in too late.  This is mainly used when
            // thawing the vis pipeline.
            "max-lateness", GST_SECOND / 120,
            // Deliver buffers one frame early.  This allows for rendering
            // time.  (TODO: It would be great to calculate this on-the-fly so
            // we match the rendering time.
            "ts-offset", -GST_SECOND / 60,
            // Don't go to PAUSED when we freeze the pipeline.
            "async", FALSE, NULL);
    
    gst_bin_add_many (GST_BIN (player->audiobin), audiosinkqueue, resampler,
                      converter, fakesink, NULL);
    
    pad = gst_element_get_static_pad (audiosinkqueue, "sink");
    teepad = gst_element_get_request_pad (player->audiotee, "src%d");
    gst_pad_link (teepad, pad);
    gst_object_unref (GST_OBJECT (teepad));
    gst_object_unref (GST_OBJECT (pad));
    
    gst_element_link_many (audiosinkqueue, resampler, converter, NULL);
    
    caps = gst_static_caps_get (&vis_data_sink_caps);
    gst_element_link_filtered (converter, fakesink, caps);
    gst_caps_unref (caps);
    
    player->vis_buffer = gst_adapter_new ();
    player->vis_resampler = resampler;
    player->vis_thawing = FALSE;
    player->vis_enabled = FALSE;

    // Disable the pipeline till we hear otherwise from managed land.
    _bp_vis_pipeline_set_blocked (player, TRUE);
}