QImage MltPreview::getFrame(Mlt::Producer *producer, int framepos, int /*width*/, int height) { QImage result; if (producer == NULL) { return result; } producer->seek(framepos); Mlt::Frame *frame = producer->get_frame(); if (frame == NULL) { return result; } mlt_image_format format = mlt_image_rgb24a; height = 200; double ar = frame->get_double("aspect_ratio"); if (ar == 0.0) ar = 1.33; int calculated_width = (int)((double) height * ar); uint8_t *data = frame->get_image(format, calculated_width, height, 0); QImage image((uchar *)data, calculated_width, height, QImage::Format_ARGB32); if (!image.isNull()) { result = image.rgbSwapped().convertToFormat(QImage::Format_RGB32); } delete frame; return result; }
void AudioPeakMeterScopeWidget::refreshScope(const QSize& /*size*/, bool /*full*/) { SharedFrame sFrame; while (m_queue.count() > 0) { sFrame = m_queue.pop(); if (sFrame.is_valid() && sFrame.get_audio_samples() > 0) { mlt_audio_format format = mlt_audio_s16; int channels = sFrame.get_audio_channels(); int frequency = sFrame.get_audio_frequency(); int samples = sFrame.get_audio_samples(); Mlt::Frame mFrame = sFrame.clone(true, false, false); m_filter->process(mFrame); mFrame.get_audio( format, frequency, channels, samples ); QVector<double> levels; for (int i = 0; i < channels; i++) { QString s = QString("meta.media.audio_level.%1").arg(i); double audioLevel = mFrame.get_double(s.toLatin1().constData()); if (audioLevel == 0.0) { levels << -100.0; } else { levels << 20 * log10(audioLevel); } } QMetaObject::invokeMethod(m_audioMeter, "showAudio", Qt::QueuedConnection, Q_ARG(const QVector<double>&, levels)); } }
void MonitorAudioLevel::refreshScope(const QSize& /*size*/, bool /*full*/) { SharedFrame sFrame; while (m_queue.count() > 0) { sFrame = m_queue.pop(); if (sFrame.is_valid() && sFrame.get_audio_samples() > 0) { mlt_audio_format format = mlt_audio_s16; int channels = sFrame.get_audio_channels(); int frequency = sFrame.get_audio_frequency(); int samples = sFrame.get_audio_samples(); Mlt::Frame mFrame = sFrame.clone(true, false, false); m_filter->process(mFrame); mFrame.get_audio( format, frequency, channels, samples ); if (samples == 0) { // There was an error processing audio from frame continue; } QVector<int> levels; for (int i = 0; i < audioChannels; i++) { QString s = QString("meta.media.audio_level.%1").arg(i); double audioLevel = mFrame.get_double(s.toLatin1().constData()); if (audioLevel == 0.0) { levels << -100; } else { levels << (int) levelToDB(audioLevel); } } QMetaObject::invokeMethod(this, "setAudioValues", Qt::QueuedConnection, Q_ARG(const QVector<int>&, levels)); } }
void AudioGraphSpectrum::processSpectrum(const SharedFrame&frame) { if (!isVisible()) return; mlt_audio_format format = mlt_audio_s16; int channels = frame.get_audio_channels(); int frequency = frame.get_audio_frequency(); int samples = frame.get_audio_samples(); Mlt::Frame mFrame = frame.clone(true, false, false); m_filter->process(mFrame); mFrame.get_audio( format, frequency, channels, samples ); QVector<double> bands(AUDIBLE_BAND_COUNT); float* bins = (float*)m_filter->get_data("bins"); int bin_count = m_filter->get_int("bin_count"); double bin_width = m_filter->get_double("bin_width"); int band = 0; bool firstBandFound = false; for (int bin = 0; bin < bin_count; bin++) { // Loop through all the FFT bins and align bin frequencies with // band frequencies. double F = bin_width * (double)bin; if (!firstBandFound) { // Skip bins that come before the first band. if (BAND_TAB[band + FIRST_AUDIBLE_BAND_INDEX].low > F) { continue; } else { firstBandFound = true; bands[band] = bins[bin]; } } else if (BAND_TAB[band + FIRST_AUDIBLE_BAND_INDEX].high < F) { // This bin is outside of this band - move to the next band. band++; if ((band + FIRST_AUDIBLE_BAND_INDEX) > LAST_AUDIBLE_BAND_INDEX) { // Skip bins that come after the last band. break; } bands[band] = bins[bin]; } else if (bands[band] < bins[bin] ) { // Pick the highest bin level within this band to represent the // whole band. bands[band] = bins[bin]; } } // At this point, bands contains the magnitude of the signal for each // band. Convert to dB. for (band = 0; band < bands.size(); band++) { double mag = bands[band]; double dB = mag > 0.0 ? 20 * log10( mag ) : -1000.0; bands[band] = dB; } // Update the audio signal widget QMetaObject::invokeMethod(m_graphWidget, "showAudio", Qt::QueuedConnection, Q_ARG(const QVector<double>&, bands)); }
void AudioEnvelope::loadEnvelope() { Q_ASSERT(m_envelope == NULL); std::cout << "Loading envelope ..." << std::endl; int samplingRate = m_info->info(0)->samplingRate(); mlt_audio_format format_s16 = mlt_audio_s16; int channels = 1; Mlt::Frame *frame; int64_t position; int samples; m_envelope = new int64_t[m_envelopeSize]; m_envelopeMax = 0; m_envelopeMean = 0; QTime t; t.start(); int count = 0; m_producer->seek(m_offset); m_producer->set_speed(1.0); // This is necessary, otherwise we don't get any new frames in the 2nd run. for (int i = 0; i < m_envelopeSize; i++) { frame = m_producer->get_frame(i); position = mlt_frame_get_position(frame->get_frame()); samples = mlt_sample_calculator(m_producer->get_fps(), samplingRate, position); int16_t *data = static_cast<int16_t*>(frame->get_audio(format_s16, samplingRate, channels, samples)); int64_t sum = 0; for (int k = 0; k < samples; k++) { sum += fabs(data[k]); } m_envelope[i] = sum; m_envelopeMean += sum; if (sum > m_envelopeMax) { m_envelopeMax = sum; } // std::cout << position << "|" << m_producer->get_playtime() // << "-" << m_producer->get_in() << "+" << m_producer->get_out() << " "; delete frame; count++; if (m_length > 0 && count > m_length) { break; } } m_envelopeMean /= m_envelopeSize; std::cout << "Calculating the envelope (" << m_envelopeSize << " frames) took " << t.elapsed() << " ms." << std::endl; }