コード例 #1
0
PassRefPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
    m_sampleRate = sampleRate;

    m_frontLeftBuffers = gst_buffer_list_new();
    m_frontRightBuffers = gst_buffer_list_new();

    GRefPtr<GMainContext> context = adoptGRef(g_main_context_new());
    g_main_context_push_thread_default(context.get());
    m_loop = adoptGRef(g_main_loop_new(context.get(), FALSE));

    // Start the pipeline processing just after the loop is started.
    GRefPtr<GSource> timeoutSource = adoptGRef(g_timeout_source_new(0));
    g_source_attach(timeoutSource.get(), context.get());
    g_source_set_callback(timeoutSource.get(), reinterpret_cast<GSourceFunc>(enteredMainLoopCallback), this, 0);

    g_main_loop_run(m_loop.get());
    g_main_context_pop_thread_default(context.get());

    if (m_errorOccurred)
        return 0;

    unsigned channels = mixToMono ? 1 : 2;
    RefPtr<AudioBus> audioBus = AudioBus::create(channels, m_channelSize, true);
    audioBus->setSampleRate(m_sampleRate);

    copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers, audioBus->channel(0));
    if (!mixToMono)
        copyGstreamerBuffersToAudioChannel(m_frontRightBuffers, audioBus->channel(1));

    return audioBus;
}
コード例 #2
0
PassRefPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
    m_sampleRate = sampleRate;
    m_channels = mixToMono ? 1 : 2;

    m_frontLeftBuffers = gst_buffer_list_new();
    m_frontRightBuffers = gst_buffer_list_new();

    GRefPtr<GMainContext> context = adoptGRef(g_main_context_new());
    g_main_context_push_thread_default(context.get());
    m_loop = adoptGRef(g_main_loop_new(context.get(), FALSE));

    // Start the pipeline processing just after the loop is started.
    GThreadSafeMainLoopSource source;
    source.schedule("[WebKit] AudioFileReader::decodeAudioForBusCreation", std::function<void()>(std::bind(&AudioFileReader::decodeAudioForBusCreation, this)), G_PRIORITY_DEFAULT, nullptr, context.get());

    g_main_loop_run(m_loop.get());
    g_main_context_pop_thread_default(context.get());

    // Set pipeline to GST_STATE_NULL state here already ASAP to
    // release any resources that might still be used.
    gst_element_set_state(m_pipeline, GST_STATE_NULL);

    if (m_errorOccurred)
        return 0;

    RefPtr<AudioBus> audioBus = AudioBus::create(m_channels, m_channelSize, true);
    audioBus->setSampleRate(m_sampleRate);

    copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers, audioBus->channel(0));
    if (!mixToMono)
        copyGstreamerBuffersToAudioChannel(m_frontRightBuffers, audioBus->channel(1));

    return audioBus;
}
コード例 #3
0
PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
    m_sampleRate = sampleRate;

    m_frontLeftBuffers = gst_buffer_list_new();
    m_frontRightBuffers = gst_buffer_list_new();

#ifndef GST_API_VERSION_1
    m_frontLeftBuffersIterator = gst_buffer_list_iterate(m_frontLeftBuffers);
    gst_buffer_list_iterator_add_group(m_frontLeftBuffersIterator);

    m_frontRightBuffersIterator = gst_buffer_list_iterate(m_frontRightBuffers);
    gst_buffer_list_iterator_add_group(m_frontRightBuffersIterator);
#endif

    GRefPtr<GMainContext> context = g_main_context_new();
    g_main_context_push_thread_default(context.get());
    m_loop = adoptGRef(g_main_loop_new(context.get(), FALSE));

    // Start the pipeline processing just after the loop is started.
    GSource* timeoutSource = g_timeout_source_new(0);
    g_source_attach(timeoutSource, context.get());
    g_source_set_callback(timeoutSource, reinterpret_cast<GSourceFunc>(enteredMainLoopCallback), this, 0);

    g_main_loop_run(m_loop.get());
    g_main_context_pop_thread_default(context.get());

    if (m_errorOccurred)
        return nullptr;

    unsigned channels = mixToMono ? 1 : 2;
    OwnPtr<AudioBus> audioBus = adoptPtr(new AudioBus(channels, m_channelSize, true));
    audioBus->setSampleRate(m_sampleRate);

    copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers, audioBus->channel(0));
    if (!mixToMono)
        copyGstreamerBuffersToAudioChannel(m_frontRightBuffers, audioBus->channel(1));

    return audioBus.release();
}