示例#1
0
DECLARE_TEST(atomic, cas) {
	size_t num_threads = math_clamp(system_hardware_threads() * 4, 4, 32);
	size_t ithread;
	thread_t threads[32];
	cas_value_t cas_values[32];

	for (ithread = 0; ithread < num_threads; ++ithread) {
		cas_values[ithread].val_32 = (int32_t)ithread;
		cas_values[ithread].val_64 = (int64_t)ithread;
		cas_values[ithread].val_ptr = (void*)(uintptr_t)ithread;
		thread_initialize(&threads[ithread], cas_thread, &cas_values[ithread],
		                  STRING_CONST("cas"), THREAD_PRIORITY_NORMAL, 0);
	}
	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_start(&threads[ithread]);

	test_wait_for_threads_startup(threads, num_threads);
	test_wait_for_threads_finish(threads, num_threads);

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_finalize(&threads[ithread]);

	EXPECT_EQ(atomic_load32(&val_32), 0);
	EXPECT_EQ(atomic_load64(&val_64), 0);
	EXPECT_EQ(atomic_loadptr(&val_ptr), 0);

	return 0;
}
示例#2
0
void WaveformWidgetFactory::setFrameRate(int frameRate) {
    m_frameRate = math_clamp(frameRate, 1, 120);
    if (m_config) {
        m_config->set(ConfigKey("[Waveform]","FrameRate"), ConfigValue(m_frameRate));
    }
    m_vsyncThread->setSyncIntervalTimeMicros(1e6 / m_frameRate);
}
示例#3
0
DECLARE_TEST(objectmap, thread) {
	objectmap_t* map;
	thread_t thread[32];
	size_t ith;
	size_t num_threads = math_clamp(system_hardware_threads() * 4, 4, 32);

	map = objectmap_allocate((num_threads + 1) * 512);

	for (ith = 0; ith < num_threads; ++ith)
		thread_initialize(&thread[ith], objectmap_thread, map, STRING_CONST("objectmap_thread"),
		                  THREAD_PRIORITY_NORMAL, 0);
	for (ith = 0; ith < num_threads; ++ith)
		thread_start(&thread[ith]);

	test_wait_for_threads_startup(thread, num_threads);
	test_wait_for_threads_finish(thread, num_threads);

	for (ith = 0; ith < num_threads; ++ith)
		EXPECT_EQ(thread[ith].result, 0);

	for (ith = 0; ith < num_threads; ++ith)
		thread_finalize(&thread[ith]);

	objectmap_deallocate(map);

	return 0;
}
示例#4
0
void BitCrusherEffect::processGroup(const QString& group,
                                    BitCrusherGroupState* pState,
                                    const CSAMPLE* pInput, CSAMPLE* pOutput,
                                    const unsigned int numSamples,
                                    const unsigned int sampleRate,
                                    const EffectProcessor::EnableState enableState,
                                    const GroupFeatureState& groupFeatures) {
    Q_UNUSED(group);
    Q_UNUSED(groupFeatures);
    Q_UNUSED(sampleRate); // we are normalized to 1
    Q_UNUSED(enableState); // no need to ramp, it is just a bitcrusher ;-)

    const CSAMPLE downsample = m_pDownsampleParameter ?
            m_pDownsampleParameter->value() : 0.0;

    CSAMPLE bit_depth = m_pBitDepthParameter ?
            m_pBitDepthParameter->value() : 16;

    // divided by two because we use float math which includes the sing bit anyway
    const CSAMPLE scale = pow(2.0f, bit_depth) / 2;
    // Gain correction is required, because MSB (values above 0.5) is usually
    // rarely used, to achieve equal loudness and maximum dynamic
    const CSAMPLE gainCorrection = (17 - bit_depth) / 8;

    const int kChannels = 2;
    for (unsigned int i = 0; i < numSamples; i += kChannels) {
        pState->accumulator += downsample;

        if (pState->accumulator >= 1.0) {
            pState->accumulator -= 1.0;
            if (bit_depth < 16) {

                pState->hold_l = floorf(math_clamp(pInput[i] * gainCorrection, -1.0f, 1.0f) * scale + 0.5f) / scale / gainCorrection;
                pState->hold_r = floorf(math_clamp(pInput[i+1] * gainCorrection, -1.0f, 1.0f) * scale + 0.5f) / scale / gainCorrection;
            } else {
                // Mixxx float has 24 bit depth, Audio CDs are 16 bit
                // here we do not change the depth
                pState->hold_l = pInput[i];
                pState->hold_r = pInput[i+1];
            }
        }

        pOutput[i] = pState->hold_l;
        pOutput[i+1] = pState->hold_r;
    }
}
示例#5
0
void WaveformWidgetFactory::setDefaultZoom(int zoom) {
    m_defaultZoom = math_clamp(zoom, WaveformWidgetRenderer::s_waveformMinZoom,
                               WaveformWidgetRenderer::s_waveformMaxZoom);
    if (m_config) {
        m_config->set(ConfigKey("[Waveform]","DefaultZoom"), ConfigValue(m_defaultZoom));
    }

    for (int i = 0; i < m_waveformWidgetHolders.size(); i++) {
        m_waveformWidgetHolders[i].m_waveformViewer->setZoom(m_defaultZoom);
    }
}
示例#6
0
void WVuMeter::onConnectedControlChanged(double dParameter, double dValue) {
    Q_UNUSED(dValue);
    m_dParameter = math_clamp(dParameter, 0.0, 1.0);

    if (dParameter > 0.0) {
        setPeak(dParameter);
    } else {
        // A 0.0 value is very unlikely except when the VU Meter is disabled
        m_dPeakParameter = 0;
    }

    updateState(m_timer.restart());
}
示例#7
0
void EffectChainSlot::slotControlChainSuperParameter(double v, bool force) {
    //qDebug() << debugString() << "slotControlChainSuperParameter" << v;

    // Clamp to [0.0, 1.0]
    if (v < 0.0 || v > 1.0) {
        qWarning() << debugString() << "value out of limits";
        v = math_clamp(v, 0.0, 1.0);
        m_pControlChainSuperParameter->set(v);
    }
    for (const auto& pSlot : m_slots) {
        pSlot->setMetaParameter(v, force);
    }
}
示例#8
0
void EffectChainSlot::slotControlChainMix(double v) {
    //qDebug() << debugString() << "slotControlChainMix" << v;

    // Clamp to [0.0, 1.0]
    if (v < 0.0 || v > 1.0) {
        qWarning() << debugString() << "value out of limits";
        v = math_clamp(v, 0.0, 1.0);
        m_pControlChainMix->set(v);
    }
    if (m_pEffectChain) {
        m_pEffectChain->setMix(v);
    }
}
示例#9
0
int EngineFilterBessel8Low::setFrequencyCornersForIntDelay(
        double desiredCorner1Ratio, int maxDelay) {
    // these values are calculated using the phase returned by
    // fid_response_pha() at corner / 20

    // group delay at 1 Hz freqCorner1 and 1 Hz Samplerate
    const double kDelayFactor1 = 0.506051799;
    // Factor, required to hit the end of the quadratic curve
    const double kDelayFactor2 = 1.661247;
    // Table for the non quadratic, high part near the sample rate
    const double delayRatioTable[] = {
            0.500000000,  // delay 0
            0.321399282,  // delay 1
            0.213843537,  // delay 2
            0.155141284,  // delay 3
            0.120432232,  // delay 4
            0.097999886,  // delay 5
            0.082451739,  // delay 6
            0.071098408,  // delay 7
            0.062444910,  // delay 8
            0.055665936,  // delay 9
            0.050197933,  // delay 10
            0.045689120,  // delay 11
            0.041927420,  // delay 12
            0.038735202,  // delay 13
            0.035992756,  // delay 14
            0.033611618,  // delay 15
            0.031525020,  // delay 16
            0.029681641,  // delay 17
            0.028041409,  // delay 18
            0.026572562,  // delay 19
    };


    double dDelay = kDelayFactor1 / desiredCorner1Ratio - kDelayFactor2 * desiredCorner1Ratio;
    int iDelay =  math_clamp((int)(dDelay + 0.5), 0, maxDelay);

    double quantizedRatio;
    if (iDelay >= (int)(sizeof(delayRatioTable) / sizeof(double))) {
        // pq formula, only valid for low frequencies
        quantizedRatio = (-(iDelay / kDelayFactor2 / 2)) +
                sqrt((iDelay / kDelayFactor2 / 2)*(iDelay / kDelayFactor2 / 2)
                                       + kDelayFactor1 / kDelayFactor2);
    } else {
        quantizedRatio = delayRatioTable[iDelay];
    }

    setCoefs("LpBe8", 1, quantizedRatio);
    return iDelay;
}
示例#10
0
void WVuMeter::updateState(double msecsElapsed) {
    // If we're holding at a peak then don't update anything
    m_dPeakHoldCountdownMs -= msecsElapsed;
    if (m_dPeakHoldCountdownMs > 0) {
        return;
    } else {
        m_dPeakHoldCountdownMs = 0;
    }

    // Otherwise, decrement the peak position by the fall step size times the
    // milliseconds elapsed over the fall time multiplier. The peak will fall
    // FallStep times (out of 128 steps) every FallTime milliseconds.
    m_dPeakParameter -= static_cast<double>(m_iPeakFallStep) *
            msecsElapsed /
            static_cast<double>(m_iPeakFallTime * m_iPixmapLength);
    m_dPeakParameter = math_clamp(m_dPeakParameter, 0.0, 1.0);
}
示例#11
0
DECLARE_TEST( uuid, threaded )
{
	object_t thread[32];
	int ith, i, jth, j;
	int num_threads = math_clamp( system_hardware_threads() + 1, 3, 32 );

	for( ith = 0; ith < num_threads; ++ith )
	{
		thread[ith] = thread_create( uuid_thread_time, "uuid_thread", THREAD_PRIORITY_NORMAL, 0 );
		thread_start( thread[ith], (void*)(uintptr_t)ith );
	}

	test_wait_for_threads_startup( thread, num_threads );
	
	for( ith = 0; ith < num_threads; ++ith )
	{
		thread_terminate( thread[ith] );
		thread_destroy( thread[ith] );
	}

	test_wait_for_threads_exit( thread, num_threads );

	for( ith = 0; ith < num_threads; ++ith )
	{
		for( i = 0; i < 8192; ++i )
		{
			for( jth = ith + 1; jth < num_threads; ++jth )
			{
				for( j = 0; j < 8192; ++j )
				{
					EXPECT_FALSE( uuid_equal( uuid_thread_store[ith][i], uuid_thread_store[jth][j] ) );
				}
			}
			for( j = i + 1; j < 8192; ++j )
			{
				EXPECT_FALSE( uuid_equal( uuid_thread_store[ith][i], uuid_thread_store[ith][j] ) );
			}
		}
	}
	
	return 0;
}
示例#12
0
DECLARE_TEST(atomic, add) {
	size_t num_threads = math_clamp(system_hardware_threads() * 4, 4, 32);
	size_t ithread;
	thread_t threads[32];

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_initialize(&threads[ithread], add_thread, 0,
		                  STRING_CONST("add"), THREAD_PRIORITY_NORMAL, 0);
	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_start(&threads[ithread]);

	test_wait_for_threads_startup(threads, num_threads);
	test_wait_for_threads_finish(threads, num_threads);

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_finalize(&threads[ithread]);

	EXPECT_EQ(atomic_load32(&val_32), 0);
	EXPECT_EQ(atomic_load64(&val_64), 0);

	return 0;
}
示例#13
0
int CachingReader::read(int sample, int num_samples, CSAMPLE* buffer) {
    // Check for bad inputs
    if (sample % 2 != 0 || num_samples < 0 || !buffer) {
        QString temp = QString("Sample = %1").arg(sample);
        qDebug() << "CachingReader::read() invalid arguments sample:" << sample
                 << "num_samples:" << num_samples << "buffer:" << buffer;
        return 0;
    }

    // If asked to read 0 samples, don't do anything. (this is a perfectly
    // reasonable request that happens sometimes. If no track is loaded, don't
    // do anything.
    if (num_samples == 0 || m_readerStatus != TRACK_LOADED) {
        return 0;
    }

    // Process messages from the reader thread.
    process();

    // TODO: is it possible to move this code out of caching reader
    // and into enginebuffer?  It doesn't quite make sense here, although
    // it makes preroll completely transparent to the rest of the code

    //if we're in preroll...
    int zerosWritten = 0;
    if (sample < 0) {
        if (sample + num_samples <= 0) {
            //everything is zeros, easy
            memset(buffer, 0, sizeof(*buffer) * num_samples);
            return num_samples;
        } else {
            //some of the buffer is zeros, some is from the file
            memset(buffer, 0, sizeof(*buffer) * (0 - sample));
            buffer += (0 - sample);
            num_samples = sample + num_samples;
            zerosWritten = (0 - sample);
            sample = 0;
            //continue processing the rest of the chunks normally
        }
    }

    int start_sample = math_min(m_iTrackNumSamplesCallbackSafe,
                                sample);
    int start_chunk = chunkForSample(start_sample);
    int end_sample = math_min(m_iTrackNumSamplesCallbackSafe,
                              sample + num_samples - 1);
    int end_chunk = chunkForSample(end_sample);

    int samples_remaining = num_samples;
    int current_sample = sample;

    // Sanity checks
    if (start_chunk > end_chunk) {
        qDebug() << "CachingReader::read() bad chunk range to read ["
                 << start_chunk << end_chunk << "]";
        return 0;
    }

    for (int chunk_num = start_chunk; chunk_num <= end_chunk; chunk_num++) {
        Chunk* current = lookupChunk(chunk_num);

        // If the chunk is not in cache, then we must return an error.
        if (current == NULL) {
            // qDebug() << "Couldn't get chunk " << chunk_num
            //          << " in read() of [" << sample << "," << sample + num_samples
            //          << "] chunks " << start_chunk << "-" << end_chunk;

            // Something is wrong. Break out of the loop, that should fill the
            // samples requested with zeroes.
            Counter("CachingReader::read cache miss")++;
            break;
        }

        int chunk_start_sample = CachingReaderWorker::sampleForChunk(chunk_num);
        int chunk_offset = current_sample - chunk_start_sample;
        int chunk_remaining_samples = current->length - chunk_offset;

        // More sanity checks
        if (current_sample < chunk_start_sample || current_sample % 2 != 0) {
            qDebug() << "CachingReader::read() bad chunk parameters"
                     << "chunk_start_sample" << chunk_start_sample
                     << "current_sample" << current_sample;
            break;
        }

        // If we're past the start_chunk then current_sample should be
        // chunk_start_sample.
        if (start_chunk != chunk_num && chunk_start_sample != current_sample) {
            qDebug() << "CachingReader::read() bad chunk parameters"
                     << "chunk_num" << chunk_num
                     << "start_chunk" << start_chunk
                     << "chunk_start_sample" << chunk_start_sample
                     << "current_sample" << current_sample;
            break;
        }

        if (samples_remaining < 0) {
            qDebug() << "CachingReader::read() bad samples remaining"
                     << samples_remaining;
            break;
        }

        // It is completely possible that chunk_remaining_samples is less than
        // zero. If the caller is trying to read from beyond the end of the
        // file, then this can happen. We should tolerate it.
        if (chunk_remaining_samples < 0) {
            chunk_remaining_samples = 0;
        }
        int samples_to_read = math_clamp(samples_remaining, 0,
                                         chunk_remaining_samples);

        // If we did not decide to read any samples from this chunk then that
        // means we have exhausted all the samples in the song.
        if (samples_to_read == 0) {
            break;
        }

        // samples_to_read should be non-negative and even
        if (samples_to_read < 0 || samples_to_read % 2 != 0) {
            qDebug() << "CachingReader::read() samples_to_read invalid"
                     << samples_to_read;
            break;
        }

        // TODO(rryan) do a test and see if using memcpy is faster than gcc
        // optimizing the for loop
        CSAMPLE *data = current->data + chunk_offset;
        memcpy(buffer, data, sizeof(*buffer) * samples_to_read);
        // for (int i=0; i < samples_to_read; i++) {
        //     buffer[i] = data[i];
        // }

        buffer += samples_to_read;
        current_sample += samples_to_read;
        samples_remaining -= samples_to_read;
    }

    // If we didn't supply all the samples requested, that probably means we're
    // at the end of the file, or something is wrong. Provide zeroes and pretend
    // all is well. The caller can't be bothered to check how long the file is.
    // TODO(XXX) memset
    for (int i=0; i<samples_remaining; i++) {
        buffer[i] = 0.0f;
    }
    samples_remaining = 0;

    // if (samples_remaining != 0) {
    //     qDebug() << "CachingReader::read() did read all requested samples.";
    // }
    return zerosWritten + num_samples - samples_remaining;
}
void QtWaveformRendererSimpleSignal::draw(QPainter* painter, QPaintEvent* /*event*/) {

    TrackPointer pTrack = m_waveformRenderer->getTrackInfo();
    if (!pTrack) {
        return;
    }

    ConstWaveformPointer waveform = pTrack->getWaveform();
    if (waveform.isNull()) {
        return;
    }

    const int dataSize = waveform->getDataSize();
    if (dataSize <= 1) {
        return;
    }

    const WaveformData* data = waveform->data();
    if (data == NULL) {
        return;
    }

    painter->save();

    painter->setRenderHint(QPainter::Antialiasing);
    painter->resetTransform();

    float allGain(1.0);
    getGains(&allGain, NULL, NULL, NULL);

    double heightGain = allGain * (double)m_waveformRenderer->getHeight()/255.0;
    if (m_alignment == Qt::AlignTop) {
        painter->translate(0.0, 0.0);
        painter->scale(1.0, heightGain);
    } else if (m_alignment == Qt::AlignBottom) {
        painter->translate(0.0, m_waveformRenderer->getHeight());
        painter->scale(1.0, heightGain);
    } else {
        painter->translate(0.0, m_waveformRenderer->getHeight()/2.0);
        painter->scale(1.0, 0.5*heightGain);
    }

    //draw reference line
    if (m_alignment == Qt::AlignCenter) {
        painter->setPen(m_pColors->getAxesColor());
        painter->drawLine(0,0,m_waveformRenderer->getWidth(),0);
    }

    const double firstVisualIndex = m_waveformRenderer->getFirstDisplayedPosition() * dataSize;
    const double lastVisualIndex = m_waveformRenderer->getLastDisplayedPosition() * dataSize;
    m_polygon.clear();
    m_polygon.reserve(2 * m_waveformRenderer->getWidth() + 2);
    m_polygon.append(QPointF(0.0, 0.0));

    const double offset = firstVisualIndex;

    // Represents the # of waveform data points per horizontal pixel.
    const double gain = (lastVisualIndex - firstVisualIndex) /
            (double)m_waveformRenderer->getWidth();

    //NOTE(vrince) Please help me find a better name for "channelSeparation"
    //this variable stand for merged channel ... 1 = merged & 2 = separated
    int channelSeparation = 2;
    if (m_alignment != Qt::AlignCenter)
        channelSeparation = 1;

    for (int channel = 0; channel < channelSeparation; ++channel) {
        int startPixel = 0;
        int endPixel = m_waveformRenderer->getWidth() - 1;
        int delta = 1;
        double direction = 1.0;

        //Reverse display for merged bottom channel
        if (m_alignment == Qt::AlignBottom)
            direction = -1.0;

        if (channel == 1) {
            startPixel = m_waveformRenderer->getWidth() - 1;
            endPixel = 0;
            delta = -1;
            direction = -1.0;

            // After preparing the first channel, insert the pivot point.
            m_polygon.append(QPointF(m_waveformRenderer->getWidth(), 0.0));
        }

        for (int x = startPixel;
                (startPixel < endPixel) ? (x <= endPixel) : (x >= endPixel);
                x += delta) {

            // TODO(rryan) remove before 1.11 release. I'm seeing crashes
            // sometimes where the pointIndex is very very large. It hasn't come
            // back since adding locking, but I'm leaving this so that we can
            // get some info about it before crashing. (The crash usually
            // corrupts a lot of the stack).
            if (m_polygon.size() > 2 * m_waveformRenderer->getWidth() + 2) {
                qDebug() << "OUT OF CONTROL"
                         << 2 * m_waveformRenderer->getWidth() + 2
                         << dataSize
                         << channel << m_polygon.size() << x;
            }

            // Width of the x position in visual indices.
            const double xSampleWidth = gain * x;

            // Effective visual index of x
            const double xVisualSampleIndex = xSampleWidth + offset;

            // Our current pixel (x) corresponds to a number of visual samples
            // (visualSamplerPerPixel) in our waveform object. We take the max of
            // all the data points on either side of xVisualSampleIndex within a
            // window of 'maxSamplingRange' visual samples to measure the maximum
            // data point contained by this pixel.
            double maxSamplingRange = gain / 2.0;

            // Since xVisualSampleIndex is in visual-samples (e.g. R,L,R,L) we want
            // to check +/- maxSamplingRange frames, not samples. To do this, divide
            // xVisualSampleIndex by 2. Since frames indices are integers, we round
            // to the nearest integer by adding 0.5 before casting to int.
            int visualFrameStart = int(xVisualSampleIndex / 2.0 - maxSamplingRange + 0.5);
            int visualFrameStop = int(xVisualSampleIndex / 2.0 + maxSamplingRange + 0.5);

            // If the entire sample range is off the screen then don't calculate a
            // point for this pixel.
            const int lastVisualFrame = dataSize / 2 - 1;
            if (visualFrameStop < 0 || visualFrameStart > lastVisualFrame) {
                m_polygon.append(QPointF(x, 0.0));
                continue;
            }

            // We now know that some subset of [visualFrameStart,
            // visualFrameStop] lies within the valid range of visual
            // frames. Clamp visualFrameStart/Stop to within [0,
            // lastVisualFrame].
            visualFrameStart = math_clamp(visualFrameStart, 0, lastVisualFrame);
            visualFrameStop = math_clamp(visualFrameStop, 0, lastVisualFrame);

            int visualIndexStart = visualFrameStart * 2 + channel;
            int visualIndexStop = visualFrameStop * 2 + channel;

            // if (x == m_waveformRenderer->getWidth() / 2) {
            //     qDebug() << "audioVisualRatio" << waveform->getAudioVisualRatio();
            //     qDebug() << "visualSampleRate" << waveform->getVisualSampleRate();
            //     qDebug() << "audioSamplesPerVisualPixel" << waveform->getAudioSamplesPerVisualSample();
            //     qDebug() << "visualSamplePerPixel" << visualSamplePerPixel;
            //     qDebug() << "xSampleWidth" << xSampleWidth;
            //     qDebug() << "xVisualSampleIndex" << xVisualSampleIndex;
            //     qDebug() << "maxSamplingRange" << maxSamplingRange;;
            //     qDebug() << "Sampling pixel " << x << "over [" << visualIndexStart << visualIndexStop << "]";
            // }

            unsigned char maxAll = 0;

            for (int i = visualIndexStart; i >= 0 && i < dataSize && i <= visualIndexStop;
                 i += channelSeparation) {
                const WaveformData& waveformData = *(data + i);
                unsigned char all = waveformData.filtered.all;
                maxAll = math_max(maxAll, all);
            }

            m_polygon.append(QPointF(x, (float)maxAll * direction));
        }
    }

    //If channel are not displayed separatly we nne to close the loop properly
    if (channelSeparation == 1) {
        m_polygon.append(QPointF(m_waveformRenderer->getWidth(), 0.0));
    }

    painter->setPen(m_borderPen);
    painter->setBrush(m_brush);

    painter->drawPolygon(&m_polygon[0], m_polygon.size());

    painter->restore();
}
示例#15
0
void WaveformRendererHSV::draw(QPainter* painter,
                                          QPaintEvent* /*event*/) {
    const TrackPointer trackInfo = m_waveformRenderer->getTrackInfo();
    if (!trackInfo) {
        return;
    }

    ConstWaveformPointer waveform = trackInfo->getWaveform();
    if (waveform.isNull()) {
        return;
    }

    const int dataSize = waveform->getDataSize();
    if (dataSize <= 1) {
        return;
    }

    const WaveformData* data = waveform->data();
    if (data == NULL) {
        return;
    }

    painter->save();
    painter->setRenderHints(QPainter::Antialiasing, false);
    painter->setRenderHints(QPainter::HighQualityAntialiasing, false);
    painter->setRenderHints(QPainter::SmoothPixmapTransform, false);
    painter->setWorldMatrixEnabled(false);
    painter->resetTransform();

    // Rotate if drawing vertical waveforms
    if (m_waveformRenderer->getOrientation() == Qt::Vertical) {
        painter->setTransform(QTransform(0, 1, 1, 0, 0, 0));
    }

    const double firstVisualIndex = m_waveformRenderer->getFirstDisplayedPosition() * dataSize;
    const double lastVisualIndex = m_waveformRenderer->getLastDisplayedPosition() * dataSize;

    const double offset = firstVisualIndex;

    // Represents the # of waveform data points per horizontal pixel.
    const double gain = (lastVisualIndex - firstVisualIndex) /
            (double)m_waveformRenderer->getLength();

    float allGain(1.0);
    getGains(&allGain, NULL, NULL, NULL);

    // Save HSV of waveform color. NOTE(rryan): On ARM, qreal is float so it's
    // important we use qreal here and not double or float or else we will get
    // build failures on ARM.
    qreal h, s, v;

    // Get base color of waveform in the HSV format (s and v isn't use)
    m_pColors->getLowColor().getHsvF(&h, &s, &v);

    QColor color;
    float lo, hi, total;

    const int breadth = m_waveformRenderer->getBreadth();
    const float halfBreadth = (float)breadth / 2.0;

    const float heightFactor = allGain * halfBreadth / 255.0;

    //draw reference line
    painter->setPen(m_pColors->getAxesColor());
    painter->drawLine(0, halfBreadth, m_waveformRenderer->getLength(), halfBreadth);

    for (int x = 0; x < m_waveformRenderer->getLength(); ++x) {
        // Width of the x position in visual indices.
        const double xSampleWidth = gain * x;

        // Effective visual index of x
        const double xVisualSampleIndex = xSampleWidth + offset;

        // Our current pixel (x) corresponds to a number of visual samples
        // (visualSamplerPerPixel) in our waveform object. We take the max of
        // all the data points on either side of xVisualSampleIndex within a
        // window of 'maxSamplingRange' visual samples to measure the maximum
        // data point contained by this pixel.
        double maxSamplingRange = gain / 2.0;

        // Since xVisualSampleIndex is in visual-samples (e.g. R,L,R,L) we want
        // to check +/- maxSamplingRange frames, not samples. To do this, divide
        // xVisualSampleIndex by 2. Since frames indices are integers, we round
        // to the nearest integer by adding 0.5 before casting to int.
        int visualFrameStart = int(xVisualSampleIndex / 2.0 - maxSamplingRange + 0.5);
        int visualFrameStop = int(xVisualSampleIndex / 2.0 + maxSamplingRange + 0.5);
        const int lastVisualFrame = dataSize / 2 - 1;

        // We now know that some subset of [visualFrameStart, visualFrameStop]
        // lies within the valid range of visual frames. Clamp
        // visualFrameStart/Stop to within [0, lastVisualFrame].
        visualFrameStart = math_clamp(visualFrameStart, 0, lastVisualFrame);
        visualFrameStop = math_clamp(visualFrameStop, 0, lastVisualFrame);

        int visualIndexStart = visualFrameStart * 2;
        int visualIndexStop = visualFrameStop * 2;

        int maxLow[2] = {0, 0};
        int maxHigh[2] = {0, 0};
        int maxMid[2] = {0, 0};
        int maxAll[2] = {0, 0};

        for (int i = visualIndexStart;
             i >= 0 && i + 1 < dataSize && i + 1 <= visualIndexStop; i += 2) {
            const WaveformData& waveformData = *(data + i);
            const WaveformData& waveformDataNext = *(data + i + 1);
            maxLow[0] = math_max(maxLow[0], (int)waveformData.filtered.low);
            maxLow[1] = math_max(maxLow[1], (int)waveformDataNext.filtered.low);
            maxMid[0] = math_max(maxMid[0], (int)waveformData.filtered.mid);
            maxMid[1] = math_max(maxMid[1], (int)waveformDataNext.filtered.mid);
            maxHigh[0] = math_max(maxHigh[0], (int)waveformData.filtered.high);
            maxHigh[1] = math_max(maxHigh[1], (int)waveformDataNext.filtered.high);
            maxAll[0] = math_max(maxAll[0], (int)waveformData.filtered.all);
            maxAll[1] = math_max(maxAll[1], (int)waveformDataNext.filtered.all);
        }

        if (maxAll[0] && maxAll[1]) {
            // Calculate sum, to normalize
            // Also multiply on 1.2 to prevent very dark or light color
            total = (maxLow[0] + maxLow[1] + maxMid[0] + maxMid[1] + maxHigh[0] + maxHigh[1]) * 1.2;

            // prevent division by zero
            if (total > 0)
            {
                // Normalize low and high (mid not need, because it not change the color)
                lo = (maxLow[0] + maxLow[1]) / total;
                hi = (maxHigh[0] + maxHigh[1]) / total;
            }
            else
                lo = hi = 0.0;

            // Set color
            color.setHsvF(h, 1.0-hi, 1.0-lo);

            painter->setPen(color);
            switch (m_alignment) {
                case Qt::AlignBottom :
                case Qt::AlignRight :
                    painter->drawLine(
                        x, breadth,
                        x, breadth - (int)(heightFactor * (float)math_max(maxAll[0],maxAll[1])));
                    break;
                case Qt::AlignTop :
                case Qt::AlignLeft :
                    painter->drawLine(
                        x, 0,
                        x, (int)(heightFactor * (float)math_max(maxAll[0],maxAll[1])));
                    break;
                default :
                    painter->drawLine(
                        x, (int)(halfBreadth - heightFactor * (float)maxAll[0]),
                        x, (int)(halfBreadth + heightFactor * (float)maxAll[1]));
            }
        }
    }

    painter->restore();
}
示例#16
0
void FlangerEffect::processChannel(const ChannelHandle& handle,
                                   FlangerGroupState* pState,
                                   const CSAMPLE* pInput, CSAMPLE* pOutput,
                                   const mixxx::EngineParameters& bufferParameters,
                                   const EffectEnableState enableState,
                                   const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    double lfoPeriodParameter = m_pSpeedParameter->value();
    double lfoPeriodFrames;
    if (groupFeatures.has_beat_length_sec) {
        // lfoPeriodParameter is a number of beats
        lfoPeriodParameter = std::max(roundToFraction(lfoPeriodParameter, 2.0), kMinLfoBeats);
        if (m_pTripletParameter->toBool()) {
            lfoPeriodParameter /= 3.0;
        }
        lfoPeriodFrames = lfoPeriodParameter * groupFeatures.beat_length_sec
                * bufferParameters.sampleRate();
    } else {
        // lfoPeriodParameter is a number of seconds
        lfoPeriodFrames = std::max(lfoPeriodParameter, kMinLfoBeats)
                * bufferParameters.sampleRate();
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (pState->previousPeriodFrames != -1.0) {
        pState->lfoFrames *= lfoPeriodFrames / pState->previousPeriodFrames;
    }
    pState->previousPeriodFrames = lfoPeriodFrames;


    // lfoPeriodSamples is used to calculate the delay for each channel
    // independently in the loop below, so do not multiply lfoPeriodSamples by
    // the number of channels.

    CSAMPLE_GAIN mix = m_pMixParameter->value();
    RampingValue<CSAMPLE_GAIN> mixRamped(
            pState->prev_mix, mix, bufferParameters.framesPerBuffer());
    pState->prev_mix = mix;

    CSAMPLE_GAIN regen = m_pRegenParameter->value();
    RampingValue<CSAMPLE_GAIN> regenRamped(
            pState->prev_regen, regen, bufferParameters.framesPerBuffer());
    pState->prev_regen = regen;

    // With and Manual is limited by amount of amplitude that remains from width
    // to kMaxDelayMs
    double width = m_pWidthParameter->value();
    double manual = m_pManualParameter->value();
    double maxManual = kCenterDelayMs + (kMaxLfoWidthMs - width) / 2;
    double minManual = kCenterDelayMs - (kMaxLfoWidthMs - width) / 2;
    manual = math_clamp(manual, minManual, maxManual);

    RampingValue<double> widthRamped(
            pState->prev_width, width, bufferParameters.framesPerBuffer());
    pState->prev_width = width;

    RampingValue<double> manualRamped(
            pState->prev_manual, manual, bufferParameters.framesPerBuffer());
    pState->prev_manual = manual;

    CSAMPLE* delayLeft = pState->delayLeft;
    CSAMPLE* delayRight = pState->delayRight;

   for (unsigned int i = 0;
          i < bufferParameters.samplesPerBuffer();
          i += bufferParameters.channelCount()) {

        CSAMPLE_GAIN mix_ramped = mixRamped.getNext();
        CSAMPLE_GAIN regen_ramped = regenRamped.getNext();
        double width_ramped = widthRamped.getNext();
        double manual_ramped = manualRamped.getNext();

        pState->lfoFrames++;
        if (pState->lfoFrames >= lfoPeriodFrames) {
            pState->lfoFrames = 0;
        }

        float periodFraction = static_cast<float>(pState->lfoFrames) / lfoPeriodFrames;
        double delayMs = manual_ramped + width_ramped / 2 * sin(M_PI * 2.0f * periodFraction);
        double delayFrames = delayMs * bufferParameters.sampleRate() / 1000;

        SINT framePrev = (pState->delayPos - static_cast<SINT>(floor(delayFrames))
                + kBufferLenth) % kBufferLenth;
        SINT frameNext = (pState->delayPos - static_cast<SINT>(ceil(delayFrames))
                + kBufferLenth) % kBufferLenth;
        CSAMPLE prevLeft = delayLeft[framePrev];
        CSAMPLE nextLeft = delayLeft[frameNext];

        CSAMPLE prevRight = delayRight[framePrev];
        CSAMPLE nextRight = delayRight[frameNext];

        CSAMPLE frac = delayFrames - floorf(delayFrames);
        CSAMPLE delayedSampleLeft = prevLeft + frac * (nextLeft - prevLeft);
        CSAMPLE delayedSampleRight = prevRight + frac * (nextRight - prevRight);

        delayLeft[pState->delayPos] = tanh_approx(pInput[i] + regen_ramped * delayedSampleLeft);
        delayRight[pState->delayPos] = tanh_approx(pInput[i + 1] + regen_ramped * delayedSampleRight);

        pState->delayPos = (pState->delayPos + 1) % kBufferLenth;

        double gain = (1 - mix_ramped + kGainCorrection * mix_ramped);
        pOutput[i] = (pInput[i] + mix_ramped * delayedSampleLeft) / gain;
        pOutput[i + 1] = (pInput[i + 1] + mix_ramped * delayedSampleRight) / gain;
    }

    if (enableState == EffectEnableState::Disabling) {
        SampleUtil::clear(delayLeft, kBufferLenth);
        SampleUtil::clear(delayRight, kBufferLenth);
        pState->previousPeriodFrames = -1;
        pState->prev_regen = 0;
        pState->prev_mix = 0;
    }
}
示例#17
0
void AutoPanEffect::processChannel(const ChannelHandle& handle, PanGroupState* pGroupState,
                              const CSAMPLE* pInput,
                              CSAMPLE* pOutput, const unsigned int numSamples,
                              const unsigned int sampleRate,
                              const EffectProcessor::EnableState enableState,
                              const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    if (enableState == EffectProcessor::DISABLED) {
        return;
    }

    PanGroupState& gs = *pGroupState;
    double width = m_pWidthParameter->value();
    double period = m_pPeriodParameter->value();
    double periodUnit = m_pPeriodUnitParameter->value();
    double smoothing = 0.5-m_pSmoothingParameter->value();

    // When the period knob is between max and max-1, the time is paused.
    // Time shouldn't be paused while enabling state as the sound
    // will be stuck in the middle even if the smoothing is at max.
    bool timePaused = period > m_pPeriodParameter->maximum() - 1
            && enableState != EffectProcessor::ENABLING;

    if (periodUnit == 1 && groupFeatures.has_beat_length) {
        // floor the param on one of these values :
        // 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, 32, 64, 128

        int i = 0;
        while (period > m_pPeriodParameter->minimum()) {
            period /= 2;
            i++;
        }

        double beats = m_pPeriodParameter->minimum();
        while (i != 0.0) {
            beats *= 2;
            i--;
        }

        period = groupFeatures.beat_length * beats;
    } else {
        // max period is 128 seconds
        period *= sampleRate;
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (gs.m_dPreviousPeriod != -1.0) {
        gs.time *= period / gs.m_dPreviousPeriod;
    }
    gs.m_dPreviousPeriod = period;


    if (gs.time > period || enableState == EffectProcessor::ENABLING) {
        gs.time = 0;
    }


    // Normally, the position goes from 0 to 1 linearly. Here we make steps at
    // 0.25 and 0.75 to have the sound fully on the right or fully on the left.
    // At the end, the "position" value can describe a sinusoid or a square
    // curve depending of the size of those steps.

    // coef of the slope
    // a = (y2 - y1) / (x2 - x1)
    //       1  / ( 1 - 2 * stepfrac)
    float a = smoothing != 0.5f ? 1.0f / (1.0f - smoothing * 2.0f) : 1.0f;

    // size of a segment of slope (controled by the "smoothing" parameter)
    float u = (0.5f - smoothing) / 2.0f;

    gs.frac.setRampingThreshold(kPositionRampingThreshold);

    double sinusoid = 0;

    for (unsigned int i = 0; i + 1 < numSamples; i += 2) {

        CSAMPLE periodFraction = CSAMPLE(gs.time) / period;

        // current quarter in the trigonometric circle
        float quarter = floorf(periodFraction * 4.0f);

        // part of the period fraction being a step (not in the slope)
        CSAMPLE stepsFractionPart = floorf((quarter + 1.0f) / 2.0f) * smoothing;

        // float inInterval = fmod( periodFraction, (period / 2.0) );
        float inStepInterval = fmod(periodFraction, 0.5f);

        CSAMPLE angleFraction;
        if (inStepInterval > u && inStepInterval < (u + smoothing)) {
            // at full left or full right
            angleFraction = quarter < 2.0f ? 0.25f : 0.75f;
        } else {
            // in the slope (linear function)
            angleFraction = (periodFraction - stepsFractionPart) * a;
        }

        // transforms the angleFraction into a sinusoid.
        // The width parameter modulates the two limits. if width values 0.5,
        // the limits will be 0.25 and 0.75. If it's 0, it will be 0.5 and 0.5
        // so the sound will be stuck at the center. If it values 1, the limits
        // will be 0 and 1 (full left and full right).
        sinusoid = sin(M_PI * 2.0f * angleFraction) * width;
        gs.frac.setWithRampingApplied((sinusoid + 1.0f) / 2.0f);

        // apply the delay
        gs.delay->process(&pInput[i], &pOutput[i],
                -0.005 * math_clamp(((gs.frac * 2.0) - 1.0f), -1.0, 1.0) * sampleRate);

        double lawCoef = computeLawCoefficient(sinusoid);
        pOutput[i] *= gs.frac * lawCoef;
        pOutput[i+1] *= (1.0f - gs.frac) * lawCoef;

        // The time shouldn't be paused if the position has not its
        // expected value due to ramping
        if (!timePaused || gs.frac.ramped) {
            gs.time++;
        }
    }
}
示例#18
0
void WaveformRendererRGB::draw(QPainter* painter,
                                          QPaintEvent* /*event*/) {
    const TrackPointer trackInfo = m_waveformRenderer->getTrackInfo();
    if (!trackInfo) {
        return;
    }

    const Waveform* waveform = trackInfo->getWaveform();
    if (waveform == NULL) {
        return;
    }

    const int dataSize = waveform->getDataSize();
    if (dataSize <= 1) {
        return;
    }

    const WaveformData* data = waveform->data();
    if (data == NULL) {
        return;
    }

    painter->save();
    painter->setRenderHints(QPainter::Antialiasing, false);
    painter->setRenderHints(QPainter::HighQualityAntialiasing, false);
    painter->setRenderHints(QPainter::SmoothPixmapTransform, false);
    painter->setWorldMatrixEnabled(false);
    painter->resetTransform();

    const double firstVisualIndex = m_waveformRenderer->getFirstDisplayedPosition() * dataSize;
    const double lastVisualIndex = m_waveformRenderer->getLastDisplayedPosition() * dataSize;

    const double offset = firstVisualIndex;

    // Represents the # of waveform data points per horizontal pixel.
    const double gain = (lastVisualIndex - firstVisualIndex) /
            (double)m_waveformRenderer->getWidth();

    float allGain(1.0);
    getGains(&allGain, NULL, NULL, NULL);

    QColor color;

    const float halfHeight = (float)m_waveformRenderer->getHeight()/2.0;

    const float heightFactor = allGain*halfHeight/255.0;

    // Draw reference line
    painter->setPen(m_pColors->getAxesColor());
    painter->drawLine(0,halfHeight,m_waveformRenderer->getWidth(),halfHeight);

    for (int x = 0; x < m_waveformRenderer->getWidth(); ++x) {
        // Width of the x position in visual indices.
        const double xSampleWidth = gain * x;

        // Effective visual index of x
        const double xVisualSampleIndex = xSampleWidth + offset;

        // Our current pixel (x) corresponds to a number of visual samples
        // (visualSamplerPerPixel) in our waveform object. We take the max of
        // all the data points on either side of xVisualSampleIndex within a
        // window of 'maxSamplingRange' visual samples to measure the maximum
        // data point contained by this pixel.
        double maxSamplingRange = gain / 2.0;

        // Since xVisualSampleIndex is in visual-samples (e.g. R,L,R,L) we want
        // to check +/- maxSamplingRange frames, not samples. To do this, divide
        // xVisualSampleIndex by 2. Since frames indices are integers, we round
        // to the nearest integer by adding 0.5 before casting to int.
        int visualFrameStart = int(xVisualSampleIndex / 2.0 - maxSamplingRange + 0.5);
        int visualFrameStop = int(xVisualSampleIndex / 2.0 + maxSamplingRange + 0.5);
        const int lastVisualFrame = dataSize / 2 - 1;

        // We now know that some subset of [visualFrameStart, visualFrameStop]
        // lies within the valid range of visual frames. Clamp
        // visualFrameStart/Stop to within [0, lastVisualFrame].
        visualFrameStart = math_clamp(visualFrameStart, 0, lastVisualFrame);
        visualFrameStop = math_clamp(visualFrameStop, 0, lastVisualFrame);

        int visualIndexStart = visualFrameStart * 2;
        int visualIndexStop  = visualFrameStop * 2;

        unsigned char maxLow  = 0;
        unsigned char maxMid  = 0;
        unsigned char maxHigh = 0;
        unsigned char maxAllA = 0;
        unsigned char maxAllB = 0;

        for (int i = visualIndexStart;
             i >= 0 && i + 1 < dataSize && i + 1 <= visualIndexStop; i += 2) {
            const WaveformData& waveformData = *(data + i);
            const WaveformData& waveformDataNext = *(data + i + 1);

            maxLow  = math_max3(maxLow,  waveformData.filtered.low,  waveformDataNext.filtered.low);
            maxMid  = math_max3(maxMid,  waveformData.filtered.mid,  waveformDataNext.filtered.mid);
            maxHigh = math_max3(maxHigh, waveformData.filtered.high, waveformDataNext.filtered.high);
            maxAllA = math_max(maxAllA, waveformData.filtered.all);
            maxAllB = math_max(maxAllB, waveformDataNext.filtered.all);
        }

        int red   = maxLow * m_lowColor.red()   + maxMid * m_midColor.red()   + maxHigh * m_highColor.red();
        int green = maxLow * m_lowColor.green() + maxMid * m_midColor.green() + maxHigh * m_highColor.green();
        int blue  = maxLow * m_lowColor.blue()  + maxMid * m_midColor.blue()  + maxHigh * m_highColor.blue();

        // Compute maximum (needed for value normalization)
        float max = (float) math_max3(red, green, blue);

        // Prevent division by zero
        if (max > 0.0f) {
            // Set color
            color.setRgbF(red / max, green / max, blue / max);

            painter->setPen(color);
            switch (m_alignment) {
                case Qt::AlignBottom :
                    painter->drawLine(
                        x, m_waveformRenderer->getHeight(),
                        x, m_waveformRenderer->getHeight() - (int)(heightFactor*(float)math_max(maxAllA,maxAllB)));
                    break;
                case Qt::AlignTop :
                    painter->drawLine(
                        x, 0,
                        x, (int)(heightFactor*(float)math_max(maxAllA,maxAllB)));
                    break;
                default :
                    painter->drawLine(
                        x, (int)(halfHeight-heightFactor*(float)maxAllA),
                        x, (int)(halfHeight+heightFactor*(float)maxAllB));
            }
        }
    }

    painter->restore();
}
void WaveformRendererFilteredSignal::draw(QPainter* painter,
                                          QPaintEvent* /*event*/) {
    const TrackPointer trackInfo = m_waveformRenderer->getTrackInfo();
    if (!trackInfo) {
        return;
    }

    const Waveform* waveform = trackInfo->getWaveform();
    if (waveform == NULL) {
        return;
    }

    const int dataSize = waveform->getDataSize();
    if (dataSize <= 1) {
        return;
    }

    const WaveformData* data = waveform->data();
    if (data == NULL) {
        return;
    }

    painter->save();
    painter->setRenderHints(QPainter::Antialiasing, false);
    painter->setRenderHints(QPainter::HighQualityAntialiasing, false);
    painter->setRenderHints(QPainter::SmoothPixmapTransform, false);
    painter->setWorldMatrixEnabled(false);
    painter->resetTransform();

    const double firstVisualIndex = m_waveformRenderer->getFirstDisplayedPosition() * dataSize;
    const double lastVisualIndex = m_waveformRenderer->getLastDisplayedPosition() * dataSize;

    // Represents the # of waveform data points per horizontal pixel.
    const double gain = (lastVisualIndex - firstVisualIndex) /
            (double)m_waveformRenderer->getWidth();

    // Per-band gain from the EQ knobs.
    float allGain(1.0), lowGain(1.0), midGain(1.0), highGain(1.0);
    getGains(&allGain, &lowGain, &midGain, &highGain);

    const float halfHeight = (float)m_waveformRenderer->getHeight()/2.0;

    const float heightFactor = m_alignment == Qt::AlignCenter
            ? allGain*halfHeight/255.0
            : allGain*m_waveformRenderer->getHeight()/255.0;

    //draw reference line
    if (m_alignment == Qt::AlignCenter) {
        painter->setPen(m_pColors->getAxesColor());
        painter->drawLine(0,halfHeight,m_waveformRenderer->getWidth(),halfHeight);
    }

    int actualLowLineNumber = 0;
    int actualMidLineNumber = 0;
    int actualHighLineNumber = 0;

    for (int x = 0; x < m_waveformRenderer->getWidth(); ++x) {
        // Width of the x position in visual indices.
        const double xSampleWidth = gain * x;

        // Effective visual index of x
        const double xVisualSampleIndex = xSampleWidth + firstVisualIndex;

        // Our current pixel (x) corresponds to a number of visual samples
        // (visualSamplerPerPixel) in our waveform object. We take the max of
        // all the data points on either side of xVisualSampleIndex within a
        // window of 'maxSamplingRange' visual samples to measure the maximum
        // data point contained by this pixel.
        double maxSamplingRange = gain / 2.0;

        // Since xVisualSampleIndex is in visual-samples (e.g. R,L,R,L) we want
        // to check +/- maxSamplingRange frames, not samples. To do this, divide
        // xVisualSampleIndex by 2. Since frames indices are integers, we round
        // to the nearest integer by adding 0.5 before casting to int.
        int visualFrameStart = int(xVisualSampleIndex / 2.0 - maxSamplingRange + 0.5);
        int visualFrameStop = int(xVisualSampleIndex / 2.0 + maxSamplingRange + 0.5);

        // If the entire sample range is off the screen then don't calculate a
        // point for this pixel.
        const int lastVisualFrame = dataSize / 2 - 1;
        if (visualFrameStop < 0 || visualFrameStart > lastVisualFrame) {
            continue;
        }

        // We now know that some subset of [visualFrameStart, visualFrameStop]
        // lies within the valid range of visual frames. Clamp
        // visualFrameStart/Stop to within [0, lastVisualFrame].
        visualFrameStart = math_clamp(visualFrameStart, 0, lastVisualFrame);
        visualFrameStop = math_clamp(visualFrameStop, 0, lastVisualFrame);

        int visualIndexStart = visualFrameStart * 2;
        int visualIndexStop = visualFrameStop * 2;

        // if (x == m_waveformRenderer->getWidth() / 2) {
        //     qDebug() << "audioVisualRatio" << waveform->getAudioVisualRatio();
        //     qDebug() << "visualSampleRate" << waveform->getVisualSampleRate();
        //     qDebug() << "audioSamplesPerVisualPixel" << waveform->getAudioSamplesPerVisualSample();
        //     qDebug() << "visualSamplePerPixel" << visualSamplePerPixel;
        //     qDebug() << "xSampleWidth" << xSampleWidth;
        //     qDebug() << "xVisualSampleIndex" << xVisualSampleIndex;
        //     qDebug() << "maxSamplingRange" << maxSamplingRange;;
        //     qDebug() << "Sampling pixel " << x << "over [" << visualIndexStart << visualIndexStop << "]";
        // }

        unsigned char maxLow[2] = {0, 0};
        unsigned char maxMid[2] = {0, 0};
        unsigned char maxHigh[2] = {0, 0};

        for (int i = visualIndexStart;
             i >= 0 && i + 1 < dataSize && i + 1 <= visualIndexStop; i += 2) {
            const WaveformData& waveformData = *(data + i);
            const WaveformData& waveformDataNext = *(data + i + 1);
            maxLow[0] = math_max(maxLow[0], waveformData.filtered.low);
            maxLow[1] = math_max(maxLow[1], waveformDataNext.filtered.low);
            maxMid[0] = math_max(maxMid[0], waveformData.filtered.mid);
            maxMid[1] = math_max(maxMid[1], waveformDataNext.filtered.mid);
            maxHigh[0] = math_max(maxHigh[0], waveformData.filtered.high);
            maxHigh[1] = math_max(maxHigh[1], waveformDataNext.filtered.high);
        }

        if (maxLow[0] && maxLow[1]) {
            switch (m_alignment) {
                case Qt::AlignBottom :
                    m_lowLines[actualLowLineNumber].setLine(
                        x, m_waveformRenderer->getHeight(),
                        x, m_waveformRenderer->getHeight() - (int)(heightFactor*lowGain*(float)math_max(maxLow[0],maxLow[1])));
                    break;
                case Qt::AlignTop :
                    m_lowLines[actualLowLineNumber].setLine(
                        x, 0,
                        x, (int)(heightFactor*lowGain*(float)math_max(maxLow[0],maxLow[1])));
                    break;
                default :
                    m_lowLines[actualLowLineNumber].setLine(
                        x, (int)(halfHeight-heightFactor*(float)maxLow[0]*lowGain),
                        x, (int)(halfHeight+heightFactor*(float)maxLow[1]*lowGain));
                    break;
            }
            actualLowLineNumber++;
        }
        if (maxMid[0] && maxMid[1]) {
            switch (m_alignment) {
                case Qt::AlignBottom :
                    m_midLines[actualMidLineNumber].setLine(
                        x, m_waveformRenderer->getHeight(),
                        x, m_waveformRenderer->getHeight() - (int)(heightFactor*midGain*(float)math_max(maxMid[0],maxMid[1])));
                    break;
                case Qt::AlignTop :
                    m_midLines[actualMidLineNumber].setLine(
                        x, 0,
                        x, (int)(heightFactor*midGain*(float)math_max(maxMid[0],maxMid[1])));
                    break;
                default :
                    m_midLines[actualMidLineNumber].setLine(
                        x, (int)(halfHeight-heightFactor*(float)maxMid[0]*midGain),
                        x, (int)(halfHeight+heightFactor*(float)maxMid[1]*midGain));
                    break;
            }
            actualMidLineNumber++;
        }
        if (maxHigh[0] && maxHigh[1]) {
            switch (m_alignment) {
                case Qt::AlignBottom :
                    m_highLines[actualHighLineNumber].setLine(
                        x, m_waveformRenderer->getHeight(),
                        x, m_waveformRenderer->getHeight() - (int)(heightFactor*highGain*(float)math_max(maxHigh[0],maxHigh[1])));
                    break;
                case Qt::AlignTop :
                    m_highLines[actualHighLineNumber].setLine(
                        x, 0,
                        x, (int)(heightFactor*highGain*(float)math_max(maxHigh[0],maxHigh[1])));
                    break;
                default :
                    m_highLines[actualHighLineNumber].setLine(
                        x, (int)(halfHeight-heightFactor*(float)maxHigh[0]*highGain),
                        x, (int)(halfHeight+heightFactor*(float)maxHigh[1]*highGain));
                    break;
            }
            actualHighLineNumber++;
        }
    }

    painter->setPen(QPen(QBrush(m_pColors->getLowColor()), 1));
    if (m_pLowKillControlObject && m_pLowKillControlObject->get() == 0.0) {
       painter->drawLines(&m_lowLines[0], actualLowLineNumber);
    }
    painter->setPen(QPen(QBrush(m_pColors->getMidColor()), 1));
    if (m_pMidKillControlObject && m_pMidKillControlObject->get() == 0.0) {
        painter->drawLines(&m_midLines[0], actualMidLineNumber);
    }
    painter->setPen(QPen(QBrush(m_pColors->getHighColor()), 1));
    if (m_pHighKillControlObject && m_pHighKillControlObject->get() == 0.0) {
        painter->drawLines(&m_highLines[0], actualHighLineNumber);
    }

    painter->restore();
}
示例#20
0
int ReadAheadManager::getNextSamples(double dRate, CSAMPLE* buffer,
                                     int requested_samples) {
    if (!even(requested_samples)) {
        qDebug() << "ERROR: Non-even requested_samples to ReadAheadManager::getNextSamples";
        requested_samples--;
    }
    bool in_reverse = dRate < 0;
    int start_sample = m_iCurrentPosition;
    //qDebug() << "start" << start_sample << requested_samples;
    int samples_needed = requested_samples;
    CSAMPLE* base_buffer = buffer;

    // A loop will only limit the amount we can read in one shot.

    const double loop_trigger = m_pLoopingControl->nextTrigger(
            dRate, m_iCurrentPosition, 0, 0);
    bool loop_active = loop_trigger != kNoTrigger;
    int preloop_samples = 0;

    if (loop_active) {
        int samples_available = in_reverse ?
                m_iCurrentPosition - loop_trigger :
                loop_trigger - m_iCurrentPosition;
        if (samples_available < 0) {
            samples_needed = 0;
        } else {
            preloop_samples = samples_available;
            samples_needed = math_clamp(samples_needed, 0, samples_available);
        }
    }

    if (in_reverse) {
        start_sample = m_iCurrentPosition - samples_needed;
    }

    // Sanity checks.
    if (samples_needed < 0) {
        qDebug() << "Need negative samples in ReadAheadManager::getNextSamples. Ignoring read";
        return 0;
    }

    int samples_read = m_pReader->read(start_sample, in_reverse, samples_needed,
                                       base_buffer);

    if (samples_read != samples_needed) {
        qDebug() << "didn't get what we wanted" << samples_read << samples_needed;
    }

    // Increment or decrement current read-ahead position
    if (in_reverse) {
        addReadLogEntry(m_iCurrentPosition, m_iCurrentPosition - samples_read);
        m_iCurrentPosition -= samples_read;
    } else {
        addReadLogEntry(m_iCurrentPosition, m_iCurrentPosition + samples_read);
        m_iCurrentPosition += samples_read;
    }

    // Activate on this trigger if necessary
    if (loop_active) {
        // LoopingControl makes the decision about whether we should loop or
        // not.
        const double loop_target = m_pLoopingControl->
                process(dRate, m_iCurrentPosition, 0, 0);

        if (loop_target != kNoTrigger) {
            m_iCurrentPosition = loop_target;

            int loop_read_position = m_iCurrentPosition +
                    (in_reverse ? preloop_samples : -preloop_samples);

            int looping_samples_read = m_pReader->read(
                    loop_read_position, in_reverse, samples_read, m_pCrossFadeBuffer);

            if (looping_samples_read != samples_read) {
                qDebug() << "ERROR: Couldn't get all needed samples for crossfade.";
            }

            // do crossfade from the current buffer into the new loop beginning
            if (samples_read != 0) { // avoid division by zero
                SampleUtil::linearCrossfadeBuffers(base_buffer, base_buffer, m_pCrossFadeBuffer, samples_read);
            }
        }
    }

    //qDebug() << "read" << m_iCurrentPosition << samples_read;
    return samples_read;
}
示例#21
0
SINT ReadAheadManager::getNextSamples(double dRate, CSAMPLE* pOutput,
        SINT requested_samples) {
    // TODO(XXX): Remove implicit assumption of 2 channels
    if (!even(requested_samples)) {
        qDebug() << "ERROR: Non-even requested_samples to ReadAheadManager::getNextSamples";
        requested_samples--;
    }
    bool in_reverse = dRate < 0;

    //qDebug() << "start" << start_sample << requested_samples;


    // A loop will only limit the amount we can read in one shot.
    const double loop_trigger = m_pLoopingControl->nextTrigger(
            dRate, m_currentPosition, 0, 0);
    bool loop_active = loop_trigger != kNoTrigger;
    SINT preloop_samples = 0;
    double samplesToLoopTrigger = 0.0;

    SINT samples_from_reader = requested_samples;
    if (loop_active) {
        samplesToLoopTrigger = in_reverse ?
                m_currentPosition - loop_trigger :
                loop_trigger - m_currentPosition;
        if (samplesToLoopTrigger < 0) {
            // We have already passed the loop trigger
            samples_from_reader = 0;
        } else {
            // We can only read whole frames from the reader.
            // Use ceil here, to be sure to reach the loop trigger.
            preloop_samples = SampleUtil::ceilPlayPosToFrameStart(samplesToLoopTrigger,
                    kNumChannels);
            // clamp requested samples from the caller to the loop trigger point
            samples_from_reader = math_clamp(requested_samples,
                    static_cast<SINT>(0), preloop_samples);
        }
    }

    // Sanity checks.
    if (samples_from_reader < 0) {
        qDebug() << "Need negative samples in ReadAheadManager::getNextSamples. Ignoring read";
        return 0;
    }

    SINT start_sample = SampleUtil::roundPlayPosToFrameStart(
            m_currentPosition, kNumChannels);

    SINT samples_read = m_pReader->read(
            start_sample, samples_from_reader, in_reverse, pOutput);

    if (samples_read != samples_from_reader) {
        qDebug() << "didn't get what we wanted" << samples_read << samples_from_reader;
    }

    // Increment or decrement current read-ahead position
    // Mixing int and double here is desired, because the fractional frame should
    // be resist
    if (in_reverse) {
        addReadLogEntry(m_currentPosition, m_currentPosition - samples_read);
        m_currentPosition -= samples_read;
    } else {
        addReadLogEntry(m_currentPosition, m_currentPosition + samples_read);
        m_currentPosition += samples_read;
    }

    // Activate on this trigger if necessary
    if (loop_active) {
        // LoopingControl makes the decision about whether we should loop or
        // not.
        const double loop_target = m_pLoopingControl->process(
                dRate, m_currentPosition, 0, 0);

        if (loop_target != kNoTrigger) {
            m_currentPosition = loop_target;
            if (preloop_samples > 0) {
                // we are up to one frame ahead of the loop trigger
                double overshoot = preloop_samples - samplesToLoopTrigger;
                // start the loop later accordingly to be sure the loop length is as desired
                // e.g. exactly one bar.
                m_currentPosition += overshoot;

                // Example in frames;
                // loop start 1.1 loop end 3.3 loop length 2.2
                // m_currentPosition samplesToLoopTrigger preloop_samples
                // 2.0               1.3                  2
                // 1.8               1.5                  2
                // 1.6               1.7                  2
                // 1.4               1.9                  2
                // 1.2               2.1                  3
                // Average preloop_samples = 2.2
            }

            // start reading before the loop start point, to crossfade these samples
            // with the samples we need to the loop end
            int loop_read_position = SampleUtil::roundPlayPosToFrameStart(
                    m_currentPosition + (in_reverse ? preloop_samples : -preloop_samples), kNumChannels);

            int looping_samples_read = m_pReader->read(
                    loop_read_position, samples_read, in_reverse, m_pCrossFadeBuffer);

            if (looping_samples_read != samples_read) {
                qDebug() << "ERROR: Couldn't get all needed samples for crossfade.";
            }

            // do crossfade from the current buffer into the new loop beginning
            if (samples_read != 0) { // avoid division by zero
                SampleUtil::linearCrossfadeBuffers(pOutput, pOutput, m_pCrossFadeBuffer, samples_read);
            }
        }
    }

    //qDebug() << "read" << m_currentPosition << samples_read;
    return samples_read;
}
示例#22
0
void WVuMeter::paintEvent(QPaintEvent * /*unused*/) {
    ScopedTimer t("WVuMeter::paintEvent");

    QStyleOption option;
    option.initFrom(this);
    QStylePainter p(this);
    p.drawPrimitive(QStyle::PE_Widget, option);

    if (!m_pPixmapBack.isNull() && !m_pPixmapBack->isNull()) {
        // Draw background. DrawMode takes care of whether to stretch or not.
        m_pPixmapBack->draw(rect(), &p);
    }

    if (!m_pPixmapVu.isNull() && !m_pPixmapVu->isNull()) {
        const double widgetWidth = width();
        const double widgetHeight = height();
        const double pixmapWidth = m_pPixmapVu->width();
        const double pixmapHeight = m_pPixmapVu->height();

        // Draw (part of) vu
        if (m_bHorizontal) {
            const double widgetPosition = math_clamp(widgetWidth * m_dParameter,
                                                     0.0, widgetWidth);
            QRectF targetRect(0, 0, widgetPosition, widgetHeight);

            const double pixmapPosition = math_clamp(pixmapWidth * m_dParameter,
                                                     0.0, pixmapWidth);
            QRectF sourceRect(0, 0, pixmapPosition,  m_pPixmapVu->height());
            m_pPixmapVu->draw(targetRect, &p, sourceRect);

            if (m_iPeakHoldSize > 0 && m_dPeakParameter > 0.0) {
                const double widgetPeakPosition = math_clamp(
                        widgetWidth * m_dPeakParameter, 0.0, widgetWidth);
                const double widgetPeakHoldSize = widgetWidth *
                        static_cast<double>(m_iPeakHoldSize) / pixmapWidth;

                const double pixmapPeakPosition = math_clamp(
                        pixmapWidth * m_dPeakParameter, 0.0, pixmapWidth);
                const double pixmapPeakHoldSize = m_iPeakHoldSize;

                targetRect = QRectF(widgetPeakPosition - widgetPeakHoldSize, 0,
                                    widgetPeakHoldSize, widgetHeight);
                sourceRect = QRectF(pixmapPeakPosition - pixmapPeakHoldSize, 0,
                                    pixmapPeakHoldSize, pixmapHeight);
                m_pPixmapVu->draw(targetRect, &p, sourceRect);
            }
        } else {
            const double widgetPosition = math_clamp(widgetHeight * m_dParameter,
                                                     0.0, widgetHeight);
            QRectF targetRect(0, widgetHeight - widgetPosition,
                              widgetWidth, widgetPosition);

            const double pixmapPosition = math_clamp(pixmapHeight * m_dParameter,
                                                     0.0, pixmapHeight);
            QRectF sourceRect(0, pixmapHeight - pixmapPosition,
                              pixmapWidth, pixmapPosition);
            m_pPixmapVu->draw(targetRect, &p, sourceRect);

            if (m_iPeakHoldSize > 0 && m_dPeakParameter > 0.0) {
                const double widgetPeakPosition = math_clamp(
                        widgetHeight * m_dPeakParameter, 0.0, widgetHeight);
                const double widgetPeakHoldSize = widgetHeight *
                        static_cast<double>(m_iPeakHoldSize) / pixmapHeight;

                const double pixmapPeakPosition = math_clamp(
                        pixmapHeight * m_dPeakParameter, 0.0, pixmapHeight);
                const double pixmapPeakHoldSize = m_iPeakHoldSize;

                targetRect = QRectF(0, widgetHeight - widgetPeakPosition,
                                    widgetWidth, widgetPeakHoldSize);
                sourceRect = QRectF(0, pixmapHeight - pixmapPeakPosition,
                                    pixmapWidth, pixmapPeakHoldSize);
                m_pPixmapVu->draw(targetRect, &p, sourceRect);
            }
        }
    }
    m_dLastParameter = m_dParameter;
    m_dLastPeakParameter = m_dPeakParameter;
}
示例#23
0
void CachingReader::hintAndMaybeWake(const QVector<Hint>& hintList) {
    // If no file is loaded, skip.
    if (m_readerStatus != TRACK_LOADED) {
        return;
    }

    QVectorIterator<Hint> iterator(hintList);

    // To prevent every bit of code having to guess how many samples
    // forward it makes sense to keep in memory, the hinter can provide
    // either 0 for a forward hint or -1 for a backward hint. We should
    // be calculating an appropriate number of samples to go backward as
    // some function of the latency, but for now just leave this as a
    // constant. 2048 is a pretty good number of samples because 25ms
    // latency corresponds to 1102.5 mono samples and we need double
    // that for stereo samples.
    const int default_samples = 2048;

    QSet<int> chunksToFreshen;
    while (iterator.hasNext()) {
        // Copy, don't use reference.
        Hint hint = iterator.next();

        if (hint.length == 0) {
            hint.length = default_samples;
        } else if (hint.length == -1) {
            hint.sample -= default_samples;
            hint.length = default_samples;
            if (hint.sample < 0) {
                hint.length += hint.sample;
                hint.sample = 0;
            }
        }
        if (hint.length < 0) {
            qDebug() << "ERROR: Negative hint length. Ignoring.";
            continue;
        }
        int start_sample = math_clamp(hint.sample, 0,
                                      m_iTrackNumSamplesCallbackSafe);
        int start_chunk = chunkForSample(start_sample);
        int end_sample = math_clamp(hint.sample + hint.length - 1, 0,
                                    m_iTrackNumSamplesCallbackSafe);
        int end_chunk = chunkForSample(end_sample);

        for (int current = start_chunk; current <= end_chunk; ++current) {
            chunksToFreshen.insert(current);
        }
    }

    // For every chunk that the hints indicated, check if it is in the cache. If
    // any are not, then wake.
    bool shouldWake = false;
    QSetIterator<int> setIterator(chunksToFreshen);
    while (setIterator.hasNext()) {
        int chunk = setIterator.next();

        // This will cause the chunk to be 'freshened' in the cache. The
        // chunk will be moved to the end of the LRU list.
        if (!m_chunksBeingRead.contains(chunk) && lookupChunk(chunk) == NULL) {
            shouldWake = true;
            Chunk* pChunk = allocateChunkExpireLRU();
            if (pChunk == NULL) {
                qDebug() << "ERROR: Couldn't allocate spare Chunk to make ChunkReadRequest.";
                continue;
            }
            m_chunksBeingRead.insert(chunk, pChunk);
            ChunkReadRequest request;
            pChunk->chunk_number = chunk;
            request.chunk = pChunk;
            // qDebug() << "Requesting read of chunk" << chunk << "into" << pChunk;
            // qDebug() << "Requesting read into " << request.chunk->data;
            if (m_chunkReadRequestFIFO.write(&request, 1) != 1) {
                qDebug() << "ERROR: Could not submit read request for "
                         << chunk;
            }
            //qDebug() << "Checking chunk " << chunk << " shouldWake:" << shouldWake << " chunksToRead" << m_chunksToRead.size();
        }
    }

    // If there are chunks to be read, wake up.
    if (shouldWake) {
        m_pWorker->workReady();
    }
}
示例#24
0
void EnginePregain::process(CSAMPLE* pInOut, const int iBufferSize) {
    const float fReplayGain = m_pCOReplayGain->get();
    float fReplayGainCorrection;
    if (!s_pEnableReplayGain->toBool() || m_pPassthroughEnabled->toBool()) {
        // Override replaygain value if passing through
        // TODO(XXX): consider a good default.
        // Do we expect an replaygain leveled input or
        // Normalized to 1 input?
        fReplayGainCorrection = 1; // We expect a replaygain leveled input
    } else if (fReplayGain == 0) {
        // use predicted replaygain
        fReplayGainCorrection = (float)s_pDefaultBoost->get();
        // We prepare for smoothfading to ReplayGain suggested gain
        // if ReplayGain value changes or ReplayGain is enabled
        m_bSmoothFade = true;
        m_timer.restart();
    } else {
        // Here is the point, when ReplayGain Analyser takes its action,
        // suggested gain changes from 0 to a nonzero value
        // We want to smoothly fade to this last.
        // Anyway we have some the problem that code cannot block the
        // full process for one second.
        // So we need to alter gain each time ::process is called.

        const float fullReplayGainBoost = fReplayGain *
                (float)s_pReplayGainBoost->get();

        // This means that a ReplayGain value has been calculated after the
        // track has been loaded
        const double kFadeSeconds = 1.0;

        if (m_bSmoothFade) {
            double seconds = static_cast<double>(m_timer.elapsed()) / 1e9;
            if (seconds < kFadeSeconds) {
                // Fade smoothly
                double fadeFrac = seconds / kFadeSeconds;
                fReplayGainCorrection = m_fPrevGain * (1.0 - fadeFrac) +
                        fadeFrac * fullReplayGainBoost;
            } else {
                m_bSmoothFade = false;
                fReplayGainCorrection = fullReplayGainBoost;
            }
        } else {
            // Passing a user defined boost
            fReplayGainCorrection = fullReplayGainBoost;
        }
    }

    // Clamp gain to within [0, 10.0] to prevent insane gains. This can happen
    // (some corrupt files get really high replay gain values).
    // 10 allows a maximum replay Gain Boost * calculated replay gain of ~2
    float totalGain = (float)m_pPotmeterPregain->get() *
            math_clamp(fReplayGainCorrection, 0.0f, 10.0f);

    m_pTotalGain->set(totalGain);

    // Vinylsoundemu:
    // As the speed approaches zero, hearing small bursts of sound at full volume
    // is distracting and doesn't mimic the way that vinyl sounds when played slowly.
    // Instead, reduce gain to provide a soft rolloff.
    const float kThresholdSpeed = 0.070; // Scale volume if playback speed is below 7%.
    if (fabs(m_dSpeed) < kThresholdSpeed) {
        totalGain *= fabs(m_dSpeed) / kThresholdSpeed;
    }

    if ((m_dSpeed * m_dOldSpeed < 0) && m_scratching) {
        // direction changed, go though zero if scratching
        SampleUtil::applyRampingGain(&pInOut[0], m_fPrevGain, 0, iBufferSize / 2);
        SampleUtil::applyRampingGain(&pInOut[iBufferSize / 2], 0, totalGain, iBufferSize / 2);
    } else if (totalGain != m_fPrevGain) {
        // Prevent sound wave discontinuities by interpolating from old to new gain.
        SampleUtil::applyRampingGain(pInOut, m_fPrevGain, totalGain, iBufferSize);
        m_fPrevGain = totalGain;
    } else {
        // SampleUtil deals with aliased buffers and gains of 1 or 0.
        SampleUtil::applyGain(pInOut, totalGain, iBufferSize);
    }
}
示例#25
0
DECLARE_TEST( math, utility )
{
	int i;
	
	EXPECT_REALONE( math_abs( REAL_ONE ) );
	EXPECT_REALONE( math_abs( -REAL_ONE ) );
	EXPECT_REALZERO( math_abs( REAL_ZERO ) );
	EXPECT_REALEQ( math_abs( REAL_MAX ), REAL_MAX );
	EXPECT_REALEQ( math_abs( -REAL_MAX ), REAL_MAX );
	EXPECT_REALEQ( math_abs( REAL_MIN ), REAL_MIN );
	EXPECT_REALEQ( math_abs( -REAL_MIN ), REAL_MIN );

	EXPECT_REALZERO( math_mod( REAL_ZERO, REAL_ONE ) );
	EXPECT_REALZERO( math_mod( REAL_ONE, REAL_ONE ) );
	EXPECT_REALZERO( math_mod( REAL_MAX, REAL_ONE ) );
	EXPECT_REALONE( math_mod( REAL_THREE, REAL_TWO ) );
	EXPECT_REALONE( -math_mod( -REAL_THREE, -REAL_TWO ) );

	EXPECT_EQ( math_floor( REAL_ZERO ), 0 );
	EXPECT_EQ( math_floor( REAL_C( 0.999 ) ), 0 );
	EXPECT_EQ( math_floor( REAL_C( -0.1 ) ), -1 );
	EXPECT_EQ( math_floor( REAL_C( 42.5 ) ), 42 );

	EXPECT_EQ( math_ceil( REAL_ZERO ), 0 );
	EXPECT_EQ( math_ceil( REAL_C( 0.999 ) ), 1 );
	EXPECT_EQ( math_ceil( REAL_C( -0.1 ) ), 0 );
	EXPECT_EQ( math_ceil( REAL_C( 42.5 ) ), 43 );
	EXPECT_EQ( math_ceil( REAL_C( 42.45 ) ), 43 );

	EXPECT_EQ( math_floor64( REAL_ZERO ), 0 );
	EXPECT_EQ( math_floor64( REAL_C( 0.999 ) ), 0 );
	EXPECT_EQ( math_floor64( REAL_C( -0.1 ) ), -1 );
	EXPECT_EQ( math_floor64( REAL_C( 42.5 ) ), 42 );

	EXPECT_EQ( math_ceil64( REAL_ZERO ), 0 );
	EXPECT_EQ( math_ceil64( REAL_C( 0.999 ) ), 1 );
	EXPECT_EQ( math_ceil64( REAL_C( -0.1 ) ), 0 );
	EXPECT_EQ( math_ceil64( REAL_C( 42.5 ) ), 43 );
	EXPECT_EQ( math_ceil64( REAL_C( 42.45 ) ), 43 );
	
	EXPECT_EQ( math_round( REAL_ZERO ), 0 );
	EXPECT_EQ( math_round( REAL_C( 0.999 ) ), 1 );
	EXPECT_EQ( math_round( REAL_C( -0.1 ) ), 0 );
	EXPECT_EQ( math_round( REAL_C( 42.5 ) ), 43 );
	EXPECT_EQ( math_round( REAL_C( 42.45 ) ), 42 );
	
	EXPECT_EQ( math_trunc( REAL_ZERO ), 0 );
	EXPECT_EQ( math_trunc( REAL_C( 0.999 ) ), 0 );
	EXPECT_EQ( math_trunc( REAL_C( -0.1 ) ), 0 );
	EXPECT_EQ( math_trunc( REAL_C( 42.5 ) ), 42 );

	EXPECT_EQ( math_align_poweroftwo( 2 ), 2 );
	EXPECT_EQ( math_align_poweroftwo( 3 ), 4 );
	EXPECT_EQ( math_align_poweroftwo( 4 ), 4 );
	EXPECT_EQ( math_align_poweroftwo( 33 ), 64 );
	EXPECT_EQ( math_align_poweroftwo( 134217729 ), 268435456 );

	for( i = 1; i < 31; ++i )
	{
		EXPECT_TRUE( math_is_poweroftwo( math_align_poweroftwo( ( 2 << i ) - 1 ) ) );
		EXPECT_TRUE( math_is_poweroftwo( math_align_poweroftwo( ( 2 << i )     ) ) );
		EXPECT_TRUE( math_is_poweroftwo( math_align_poweroftwo( ( 2 << i ) + 1 ) ) );

		EXPECT_FALSE( math_is_poweroftwo( ( 2 << i ) - 1 ) );
		EXPECT_TRUE( math_is_poweroftwo( ( 2 << i )     ) );
		EXPECT_FALSE( math_is_poweroftwo( ( 2 << i ) + 1 ) );
	}

	EXPECT_EQ( math_align_up( 1, 1 ), 1 );
	EXPECT_EQ( math_align_up( 1, 2 ), 2 );
	EXPECT_EQ( math_align_up( 17, 2 ), 18 );
	EXPECT_EQ( math_align_up( 43, 42 ), 84 );

	EXPECT_REALZERO( math_smoothstep( REAL_ZERO ) );
	EXPECT_REALONE( math_smoothstep( REAL_ONE ) );
	EXPECT_REALEQ( math_smoothstep( REAL_HALF ), REAL_HALF );
	
	EXPECT_REALZERO( math_smootherstep( REAL_ZERO ) );
	EXPECT_REALONE( math_smootherstep( REAL_ONE ) );
	EXPECT_REALEQ( math_smootherstep( REAL_HALF ), REAL_HALF );

	EXPECT_REALZERO( math_lerp( REAL_ZERO, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALZERO( math_lerp( REAL_ONE, REAL_ONE, REAL_ZERO ) );
	EXPECT_REALONE( math_lerp( REAL_ONE, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALONE( math_lerp( REAL_ZERO, REAL_ONE, REAL_ZERO ) );
	EXPECT_REALEQ( math_lerp( REAL_HALF, REAL_ZERO, REAL_ONE ), REAL_HALF );
	EXPECT_REALEQ( math_lerp( REAL_HALF, REAL_ZERO, REAL_ONE ), REAL_HALF );

	EXPECT_REALZERO( math_unlerp( REAL_ZERO, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALZERO( math_unlerp( REAL_ONE, REAL_ONE, REAL_ZERO ) );
	EXPECT_REALONE( math_unlerp( REAL_ONE, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALONE( math_unlerp( REAL_ZERO, REAL_ONE, REAL_ZERO ) );
	EXPECT_REALEQ( math_unlerp( REAL_HALF, REAL_ZERO, REAL_ONE ), REAL_HALF );
	EXPECT_REALEQ( math_unlerp( REAL_HALF, REAL_ZERO, REAL_ONE ), REAL_HALF );

	EXPECT_REALONE( math_linear_remap( REAL_C( 150.0 ), REAL_C( 100.0 ), REAL_C( 200.0 ), REAL_ZERO, REAL_TWO ) );

	EXPECT_REALZERO( math_clamp( -REAL_ONE, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALZERO( math_clamp( REAL_ZERO, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALEQ( math_clamp( REAL_HALF, REAL_ZERO, REAL_ONE ), REAL_HALF );
	EXPECT_REALONE( math_clamp( REAL_ONE, REAL_ZERO, REAL_ONE ) );
	EXPECT_REALONE( math_clamp( REAL_TWO, REAL_ZERO, REAL_ONE ) );

	return 0;
}
示例#26
0
DECLARE_TEST( event, delay_threaded )
{
	object_t thread[32];
	producer_thread_arg_t args[32] = {0};
	event_stream_t* stream;
	event_block_t* block;
	event_t* event;
	tick_t endtime, curtime, payloadtime, begintime, prevtime;
	unsigned int read[32];
	int i;
	bool running = true;
	int num_threads = math_clamp( system_hardware_threads() * 4, 4, 32 );

	stream = event_stream_allocate( 0 );
	begintime = time_current();

	for( i = 0; i < num_threads; ++i )
	{
		args[i].stream = stream;
		args[i].end_time = time_current() + ( time_ticks_per_second() * 5 );
		args[i].max_delay = time_ticks_per_second() * 5;
		args[i].sleep_time = 50;
		args[i].id = i;

		read[i] = 0;
		thread[i] = thread_create( producer_thread, "event_producer", THREAD_PRIORITY_NORMAL, 0 );
		thread_start( thread[i], &args[i] );
	}

	test_wait_for_threads_startup( thread, num_threads );

	while( running )
	{
		running = false;

		for( i = 0; i < num_threads; ++i )
		{
			if( thread_is_running( thread[i] ) )
			{
				running = true;
				break;
			}
		}

		thread_yield();

		prevtime = begintime;
		begintime = time_current();
		block = event_stream_process( stream );
		event = event_next( block, 0 );
		curtime = time_current();

		while( event )
		{
			running = true;
			++read[ event->object ];
			memcpy( &payloadtime, event->payload, sizeof( tick_t ) );

			EXPECT_GE( event_payload_size( event ), 8 );
			EXPECT_LE( event_payload_size( event ), 256 );
			EXPECT_GE( payloadtime, prevtime );
			EXPECT_GE( curtime, payloadtime );

			event = event_next( block, event );
			curtime = time_current();
		}
	}

	endtime = time_current() + ( time_ticks_per_second() * 6 );
	do
	{
		prevtime = begintime;
		begintime = time_current();
		block = event_stream_process( stream );
		event = event_next( block, 0 );
		curtime = time_current();
		while( event )
		{
			++read[ event->object ];
			memcpy( &payloadtime, event->payload, sizeof( tick_t ) );

			EXPECT_GE( event_payload_size( event ), 8 );
			EXPECT_LE( event_payload_size( event ), 256 );
			EXPECT_GE( payloadtime, prevtime );
			EXPECT_GE( curtime, payloadtime );

			event = event_next( block, event );
			curtime = time_current();
		}

		thread_sleep( 10 );

	} while( time_current() < endtime );

	for( i = 0; i < num_threads; ++i )
	{
		unsigned int should_have_read = (unsigned int)((uintptr_t)thread_result( thread[i] ));
		EXPECT_EQ( read[i], should_have_read );
		thread_terminate( thread[i] );
		thread_destroy( thread[i] );
	}

	test_wait_for_threads_exit( thread, num_threads );

	event_stream_deallocate( stream );
	
	return 0;
}
示例#27
0
void AutoPanEffect::processChannel(
          const ChannelHandle& handle, AutoPanGroupState* pGroupState,
          const CSAMPLE* pInput, CSAMPLE* pOutput,
          const mixxx::EngineParameters& bufferParameters,
          const EffectEnableState enableState,
          const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    if (enableState == EffectEnableState::Disabled) {
        return;
    }

    AutoPanGroupState& gs = *pGroupState;
    double width = m_pWidthParameter->value();
    double period = m_pPeriodParameter->value();
    double smoothing = 0.5 - m_pSmoothingParameter->value();

    if (groupFeatures.has_beat_length_sec) {
        // period is a number of beats
        double beats = std::max(roundToFraction(period, 2), 0.25);
        period = beats * groupFeatures.beat_length_sec * bufferParameters.sampleRate();

        // TODO(xxx) sync phase
        //if (groupFeatures.has_beat_fraction) {

    } else {
        // period is a number of seconds
        period = std::max(period, 0.25) * bufferParameters.sampleRate();
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (gs.m_dPreviousPeriod != -1.0) {
        gs.time *= period / gs.m_dPreviousPeriod;
    }


    gs.m_dPreviousPeriod = period;

    if (gs.time >= period || enableState == EffectEnableState::Enabling) {
        gs.time = 0;
    }

    // Normally, the position goes from 0 to 1 linearly. Here we make steps at
    // 0.25 and 0.75 to have the sound fully on the right or fully on the left.
    // At the end, the "position" value can describe a sinusoid or a square
    // curve depending of the size of those steps.

    // coef of the slope
    // a = (y2 - y1) / (x2 - x1)
    //       1  / ( 1 - 2 * stepfrac)
    float a = smoothing != 0.5f ? 1.0f / (1.0f - smoothing * 2.0f) : 1.0f;

    // size of a segment of slope (controlled by the "smoothing" parameter)
    float u = (0.5f - smoothing) / 2.0f;

    gs.frac.setRampingThreshold(kPositionRampingThreshold);

    double sinusoid = 0;

    // NOTE: Assuming engine is working in stereo.
    for (unsigned int i = 0; i + 1 < bufferParameters.samplesPerBuffer(); i += 2) {

        CSAMPLE periodFraction = CSAMPLE(gs.time) / period;

        // current quarter in the trigonometric circle
        float quarter = floorf(periodFraction * 4.0f);

        // part of the period fraction being a step (not in the slope)
        CSAMPLE stepsFractionPart = floorf((quarter + 1.0f) / 2.0f) * smoothing;

        // float inInterval = fmod( periodFraction, (period / 2.0) );
        float inStepInterval = fmod(periodFraction, 0.5f);

        CSAMPLE angleFraction;
        if (inStepInterval > u && inStepInterval < (u + smoothing)) {
            // at full left or full right
            angleFraction = quarter < 2.0f ? 0.25f : 0.75f;
        } else {
            // in the slope (linear function)
            angleFraction = (periodFraction - stepsFractionPart) * a;
        }

        // transforms the angleFraction into a sinusoid.
        // The width parameter modulates the two limits. if width values 0.5,
        // the limits will be 0.25 and 0.75. If it's 0, it will be 0.5 and 0.5
        // so the sound will be stuck at the center. If it values 1, the limits
        // will be 0 and 1 (full left and full right).
        sinusoid = sin(M_PI * 2.0f * angleFraction) * width;
        gs.frac.setWithRampingApplied((sinusoid + 1.0f) / 2.0f);

        // apply the delay
        gs.delay->process(&pInput[i], &pOutput[i],
                -0.005 * math_clamp(((gs.frac * 2.0) - 1.0f), -1.0, 1.0) * bufferParameters.sampleRate());

        double lawCoef = computeLawCoefficient(sinusoid);
        pOutput[i] *= gs.frac * lawCoef;
        pOutput[i+1] *= (1.0f - gs.frac) * lawCoef;

        gs.time++;
        while (gs.time >= period) {
            // Click for debug
            //pOutput[i] = 1.0f;
            //pOutput[i+1] = 1.0f;

            // The while loop is required in case period changes the value
            gs.time -= period;
        }
    }
}
示例#28
0
void EnginePregain::process(CSAMPLE* pInOut, const int iBufferSize) {
    const float fReplayGain = m_pCOReplayGain->get();
    float fReplayGainCorrection;
    if (!s_pEnableReplayGain->toBool() || m_pPassthroughEnabled->toBool()) {
        // Override replaygain value if passing through
        // TODO(XXX): consider a good default.
        // Do we expect an replaygain leveled input or
        // Normalized to 1 input?
        fReplayGainCorrection = 1; // We expect a replaygain leveled input
    } else if (fReplayGain == 0) {
        // use predicted replaygain
        fReplayGainCorrection = (float)s_pDefaultBoost->get();
        // We prepare for smoothfading to ReplayGain suggested gain
        // if ReplayGain value changes or ReplayGain is enabled
        m_bSmoothFade = true;
        m_timer.restart();
    } else {
        // Here is the point, when ReplayGain Analyzer takes its action,
        // suggested gain changes from 0 to a nonzero value
        // We want to smoothly fade to this last.
        // Anyway we have some the problem that code cannot block the
        // full process for one second.
        // So we need to alter gain each time ::process is called.

        const float fullReplayGainBoost = fReplayGain *
                (float)s_pReplayGainBoost->get();

        // This means that a ReplayGain value has been calculated after the
        // track has been loaded
        const double kFadeSeconds = 1.0;

        if (m_bSmoothFade) {
            double seconds = m_timer.elapsed().toDoubleSeconds();
            if (seconds < kFadeSeconds) {
                // Fade smoothly
                double fadeFrac = seconds / kFadeSeconds;
                fReplayGainCorrection = m_fPrevGain * (1.0 - fadeFrac) +
                        fadeFrac * fullReplayGainBoost;
            } else {
                m_bSmoothFade = false;
                fReplayGainCorrection = fullReplayGainBoost;
            }
        } else {
            // Passing a user defined boost
            fReplayGainCorrection = fullReplayGainBoost;
        }
    }

    // Clamp gain to within [0, 10.0] to prevent insane gains. This can happen
    // (some corrupt files get really high replay gain values).
    // 10 allows a maximum replay Gain Boost * calculated replay gain of ~2
    CSAMPLE_GAIN totalGain = (CSAMPLE_GAIN)m_pPotmeterPregain->get() *
            math_clamp(fReplayGainCorrection, 0.0f, 10.0f);

    m_pTotalGain->set(totalGain);

    // Vinylsoundemu:
    // Apply Gain change depending on the speed.
    // We have -Inf dB at x0, -6 dB at x0.3 and 0 dB at x1.
    // For faster speeds it is hard to measure it, but it looks like we had an
    // increasing gain there which would lead to undesired clipping.
    // This is ignored here, to avoid that.
    // It also turns out that it is physically not possible to scratch faster
    // then x5 without the needle loosing contact, our threshold for the effect.
    // So we apply a curve here that emulates the gain change up to x 2.5 natural
    // to 3.5 dB and then limits the gain towards <= 5.5 dB at am maximum of x5.
    totalGain *= log10((math_min(fabs(m_dSpeed), 5.0) * 4) + 1) / log10((1 * 4) + 1);

    if ((m_dSpeed * m_dOldSpeed < 0) && m_scratching) {
        // direction changed, go though zero if scratching
        SampleUtil::applyRampingGain(&pInOut[0], m_fPrevGain, 0, iBufferSize / 2);
        SampleUtil::applyRampingGain(&pInOut[iBufferSize / 2], 0, totalGain, iBufferSize / 2);
    } else if (totalGain != m_fPrevGain) {
        // Prevent sound wave discontinuities by interpolating from old to new gain.
        SampleUtil::applyRampingGain(pInOut, m_fPrevGain, totalGain, iBufferSize);
    } else {
        // SampleUtil deals with aliased buffers and gains of 1 or 0.
        SampleUtil::applyGain(pInOut, totalGain, iBufferSize);
    }
    m_fPrevGain = totalGain;
}
示例#29
0
double BpmControl::calcSyncAdjustment(double my_percentage, bool userTweakingSync) {
    if (m_resetSyncAdjustment) {
        m_resetSyncAdjustment = false;
        m_dLastSyncAdjustment = 1.0;
    }

    // Either shortest distance is directly to the master or backwards.

    // TODO(rryan): This is kind of backwards because we are measuring distance
    // from master to my percentage. All of the control code below is based on
    // this point of reference so I left it this way but I think we should think
    // about things in terms of "my percentage-offset setpoint" that the control
    // loop should aim to maintain.
    // TODO(rryan): All of this code is based on the assumption that a track
    // can't pass through multiple beats in one engine callback. Instead our
    // setpoint should be tracking the true offset in "samples traveled" rather
    // than modular 1.0 beat fractions. This will allow sync to work across loop
    // boundaries too.

    double master_percentage = m_dSyncTargetBeatDistance;
    double shortest_distance = shortestPercentageChange(
        master_percentage, my_percentage);

    /*qDebug() << m_sGroup << m_dUserOffset;
    qDebug() << "master beat distance:" << master_percentage;
    qDebug() << "my     beat distance:" << my_percentage;
    qDebug() << "error               :" << (shortest_distance - m_dUserOffset);
    qDebug() << "user offset         :" << m_dUserOffset;*/

    double adjustment = 1.0;

    if (userTweakingSync) {
        // Don't do anything else, leave it
        adjustment = 1.0;
        m_dUserOffset = shortest_distance;
    } else {
        double error = shortest_distance - m_dUserOffset;
        // Threshold above which we do sync adjustment.
        const double kErrorThreshold = 0.01;
        // Threshold above which sync is really, really bad, so much so that we
        // don't even know if we're ahead or behind.  This can occur when quantize was
        // off, but then it gets turned on.
        const double kTrainWreckThreshold = 0.2;
        const double kSyncAdjustmentCap = 0.05;
        if (fabs(error) > kTrainWreckThreshold) {
            // Assume poor reflexes (late button push) -- speed up to catch the other track.
            adjustment = 1.0 + kSyncAdjustmentCap;
        } else if (fabs(error) > kErrorThreshold) {
            // Proportional control constant. The higher this is, the more we
            // influence sync.
            const double kSyncAdjustmentProportional = 0.7;
            const double kSyncDeltaCap = 0.02;

            // TODO(owilliams): There are a lot of "1.0"s in this code -- can we eliminate them?
            const double adjust = 1.0 + (-error * kSyncAdjustmentProportional);
            // Cap the difference between the last adjustment and this one.
            double delta = adjust - m_dLastSyncAdjustment;
            delta = math_clamp(delta, -kSyncDeltaCap, kSyncDeltaCap);

            // Cap the adjustment between -kSyncAdjustmentCap and +kSyncAdjustmentCap
            adjustment = 1.0 + math_clamp(
                    m_dLastSyncAdjustment - 1.0 + delta,
                    -kSyncAdjustmentCap, kSyncAdjustmentCap);
        } else {
            // We are in sync, no adjustment needed.
            adjustment = 1.0;
        }
    }
    m_dLastSyncAdjustment = adjustment;
    return adjustment;
}
示例#30
0
int ReadAheadManager::getNextSamples(double dRate, CSAMPLE* buffer,
                                     int requested_samples) {
    if (!even(requested_samples)) {
        qDebug() << "ERROR: Non-even requested_samples to ReadAheadManager::getNextSamples";
        requested_samples--;
    }
    bool in_reverse = dRate < 0;
    int start_sample = m_iCurrentPosition;
    //qDebug() << "start" << start_sample << requested_samples;
    int samples_needed = requested_samples;
    CSAMPLE* base_buffer = buffer;

    // A loop will only limit the amount we can read in one shot.

    const double loop_trigger = m_sEngineControls[0]->nextTrigger(
        dRate, m_iCurrentPosition, 0, 0);
    bool loop_active = loop_trigger != kNoTrigger;
    int preloop_samples = 0;

    if (loop_active) {
        int samples_available = in_reverse ?
                m_iCurrentPosition - loop_trigger :
                loop_trigger - m_iCurrentPosition;
        preloop_samples = samples_available;
        samples_needed = math_clamp(samples_needed, 0, samples_available);
    }

    if (in_reverse) {
        start_sample = m_iCurrentPosition - samples_needed;
    }

    // Sanity checks.
    if (samples_needed < 0) {
        qDebug() << "Need negative samples in ReadAheadManager::getNextSamples. Ignoring read";
        return 0;
    }

    int samples_read = m_pReader->read(start_sample, samples_needed,
                                       base_buffer);

    if (samples_read != samples_needed) {
        qDebug() << "didn't get what we wanted" << samples_read << samples_needed;
    }

    // Increment or decrement current read-ahead position
    if (in_reverse) {
        addReadLogEntry(m_iCurrentPosition, m_iCurrentPosition - samples_read);
        m_iCurrentPosition -= samples_read;
    } else {
        addReadLogEntry(m_iCurrentPosition, m_iCurrentPosition + samples_read);
        m_iCurrentPosition += samples_read;
    }

    // Activate on this trigger if necessary
    if (loop_active) {
        // LoopingControl makes the decision about whether we should loop or
        // not.
        const double loop_target = m_sEngineControls[0]->
                process(dRate, m_iCurrentPosition, 0, 0);

        if (loop_target != kNoTrigger) {
            m_iCurrentPosition = loop_target;

            int loop_read_position = m_iCurrentPosition +
                    (in_reverse ? preloop_samples : -preloop_samples);

            int looping_samples_read = m_pReader->read(
                loop_read_position, samples_read, m_pCrossFadeBuffer);

            if (looping_samples_read != samples_read) {
                qDebug() << "ERROR: Couldn't get all needed samples for crossfade.";
            }

            // do crossfade from the current buffer into the new loop beginning
            if (samples_read != 0) { // avoid division by zero
                double mix_amount = 0.0;
                double mix_inc = 2.0 / static_cast<double>(samples_read);
                for (int i = 0; i < samples_read; i += 2) {
                    base_buffer[i] = base_buffer[i] * (1.0 - mix_amount) + m_pCrossFadeBuffer[i] * mix_amount;
                    base_buffer[i+1] = base_buffer[i+1] * (1.0 - mix_amount) + m_pCrossFadeBuffer[i+1] * mix_amount;
                    mix_amount += mix_inc;
                }
            }
        }
    }

    // Reverse the samples in-place
    if (in_reverse) {
        // TODO(rryan) pull this into MixxxUtil or something
        CSAMPLE temp1, temp2;
        for (int j = 0; j < samples_read/2; j += 2) {
            const int endpos = samples_read-1-j-1;
            temp1 = base_buffer[j];
            temp2 = base_buffer[j+1];
            base_buffer[j] = base_buffer[endpos];
            base_buffer[j+1] = base_buffer[endpos+1];
            base_buffer[endpos] = temp1;
            base_buffer[endpos+1] = temp2;
        }
    }

    //qDebug() << "read" << m_iCurrentPosition << samples_read;
    return samples_read;
}