Ejemplo n.º 1
0
void PhaserEffect::processChannel(const ChannelHandle& handle,
                                  PhaserGroupState* pState,
                                  const CSAMPLE* pInput, CSAMPLE* pOutput,
                                  const mixxx::EngineParameters& bufferParameters,
                                  const EffectEnableState enableState,
                                  const GroupFeatureState& groupFeatures,
                                  const EffectChainMixMode mixMode) {
    Q_UNUSED(handle);
    Q_UNUSED(mixMode);

    if (enableState == EffectEnableState::Enabling) {
        pState->clear();
    }

    CSAMPLE depth = 0;
    if (enableState != EffectEnableState::Disabling) {
        depth = m_pDepthParameter->value();
    }

    double periodParameter = m_pLFOPeriodParameter->value();
    double periodSamples;
    if (groupFeatures.has_beat_length_sec) {
        // periodParameter is a number of beats
        periodParameter = std::max(roundToFraction(periodParameter, 2.0), 1/4.0);
        if (m_pTripletParameter->toBool()) {
            periodParameter /= 3.0;
        }
        periodSamples = periodParameter * groupFeatures.beat_length_sec * bufferParameters.sampleRate();
    } else {
        // periodParameter is a number of seconds
        periodSamples = std::max(periodParameter, 1/4.0) * bufferParameters.sampleRate();
    }
    // freqSkip is used to calculate the phase independently for each channel,
    // so do not multiply periodSamples by the number of channels.
    CSAMPLE freqSkip = 1.0 / periodSamples * 2.0 * M_PI;

    CSAMPLE feedback = m_pFeedbackParameter->value();
    CSAMPLE range = m_pRangeParameter->value();
    int stages = 2 * m_pStagesParameter->value();

    CSAMPLE* oldInLeft = pState->oldInLeft;
    CSAMPLE* oldOutLeft = pState->oldOutLeft;
    CSAMPLE* oldInRight = pState->oldInRight;
    CSAMPLE* oldOutRight = pState->oldOutRight;

    // Using two sets of coefficients for left and right channel
    CSAMPLE filterCoefLeft = 0;
    CSAMPLE filterCoefRight = 0;

    CSAMPLE left = 0, right = 0;

    CSAMPLE_GAIN oldDepth = pState->oldDepth;
    const CSAMPLE_GAIN depthDelta = (depth - oldDepth)
            / bufferParameters.framesPerBuffer();
    const CSAMPLE_GAIN depthStart = oldDepth + depthDelta;

    int stereoCheck = m_pStereoParameter->value();
    int counter = 0;

    for (unsigned int i = 0;
            i < bufferParameters.samplesPerBuffer();
            i += bufferParameters.channelCount()) {
        left = pInput[i] + tanh(left * feedback);
        right = pInput[i + 1] + tanh(right * feedback);

        // For stereo enabled, the channels are out of phase
        pState->leftPhase = fmodf(pState->leftPhase + freqSkip, 2.0 * M_PI);
        pState->rightPhase = fmodf(pState->rightPhase + freqSkip + M_PI * stereoCheck, 2.0 * M_PI);

        // Updating filter coefficients once every 'updateCoef' samples to avoid
        // extra computing
        if ((counter++) % updateCoef == 0) {
                CSAMPLE delayLeft = 0.5 + 0.5 * sin(pState->leftPhase);
                CSAMPLE delayRight = 0.5 + 0.5 * sin(pState->rightPhase);

                // Coefficient computing based on the following:
                // https://ccrma.stanford.edu/~jos/pasp/Classic_Virtual_Analog_Phase.html
                CSAMPLE wLeft = range * delayLeft;
                CSAMPLE wRight = range * delayRight;

                CSAMPLE tanwLeft = tanh(wLeft / 2);
                CSAMPLE tanwRight = tanh(wRight / 2);

                filterCoefLeft = (1.0 - tanwLeft) / (1.0 + tanwLeft);
                filterCoefRight = (1.0 - tanwRight) / (1.0 + tanwRight);
        }

        left = processSample(left, oldInLeft, oldOutLeft, filterCoefLeft, stages);
        right = processSample(right, oldInRight, oldOutRight, filterCoefRight, stages);

        const CSAMPLE_GAIN depth = depthStart + depthDelta * (i / bufferParameters.channelCount());

        // Computing output combining the original and processed sample
        pOutput[i] = pInput[i] * (1.0 - 0.5 * depth) + left * depth * 0.5;
        pOutput[i + 1] = pInput[i + 1] * (1.0 - 0.5 * depth) + right * depth * 0.5;
    }

    pState->oldDepth = depth;
}
Ejemplo n.º 2
0
void AutoPanEffect::processChannel(
          const ChannelHandle& handle, AutoPanGroupState* pGroupState,
          const CSAMPLE* pInput, CSAMPLE* pOutput,
          const mixxx::EngineParameters& bufferParameters,
          const EffectEnableState enableState,
          const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    if (enableState == EffectEnableState::Disabled) {
        return;
    }

    AutoPanGroupState& gs = *pGroupState;
    double width = m_pWidthParameter->value();
    double period = m_pPeriodParameter->value();
    double smoothing = 0.5 - m_pSmoothingParameter->value();

    if (groupFeatures.has_beat_length_sec) {
        // period is a number of beats
        double beats = std::max(roundToFraction(period, 2), 0.25);
        period = beats * groupFeatures.beat_length_sec * bufferParameters.sampleRate();

        // TODO(xxx) sync phase
        //if (groupFeatures.has_beat_fraction) {

    } else {
        // period is a number of seconds
        period = std::max(period, 0.25) * bufferParameters.sampleRate();
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (gs.m_dPreviousPeriod != -1.0) {
        gs.time *= period / gs.m_dPreviousPeriod;
    }


    gs.m_dPreviousPeriod = period;

    if (gs.time >= period || enableState == EffectEnableState::Enabling) {
        gs.time = 0;
    }

    // Normally, the position goes from 0 to 1 linearly. Here we make steps at
    // 0.25 and 0.75 to have the sound fully on the right or fully on the left.
    // At the end, the "position" value can describe a sinusoid or a square
    // curve depending of the size of those steps.

    // coef of the slope
    // a = (y2 - y1) / (x2 - x1)
    //       1  / ( 1 - 2 * stepfrac)
    float a = smoothing != 0.5f ? 1.0f / (1.0f - smoothing * 2.0f) : 1.0f;

    // size of a segment of slope (controlled by the "smoothing" parameter)
    float u = (0.5f - smoothing) / 2.0f;

    gs.frac.setRampingThreshold(kPositionRampingThreshold);

    double sinusoid = 0;

    // NOTE: Assuming engine is working in stereo.
    for (unsigned int i = 0; i + 1 < bufferParameters.samplesPerBuffer(); i += 2) {

        CSAMPLE periodFraction = CSAMPLE(gs.time) / period;

        // current quarter in the trigonometric circle
        float quarter = floorf(periodFraction * 4.0f);

        // part of the period fraction being a step (not in the slope)
        CSAMPLE stepsFractionPart = floorf((quarter + 1.0f) / 2.0f) * smoothing;

        // float inInterval = fmod( periodFraction, (period / 2.0) );
        float inStepInterval = fmod(periodFraction, 0.5f);

        CSAMPLE angleFraction;
        if (inStepInterval > u && inStepInterval < (u + smoothing)) {
            // at full left or full right
            angleFraction = quarter < 2.0f ? 0.25f : 0.75f;
        } else {
            // in the slope (linear function)
            angleFraction = (periodFraction - stepsFractionPart) * a;
        }

        // transforms the angleFraction into a sinusoid.
        // The width parameter modulates the two limits. if width values 0.5,
        // the limits will be 0.25 and 0.75. If it's 0, it will be 0.5 and 0.5
        // so the sound will be stuck at the center. If it values 1, the limits
        // will be 0 and 1 (full left and full right).
        sinusoid = sin(M_PI * 2.0f * angleFraction) * width;
        gs.frac.setWithRampingApplied((sinusoid + 1.0f) / 2.0f);

        // apply the delay
        gs.delay->process(&pInput[i], &pOutput[i],
                -0.005 * math_clamp(((gs.frac * 2.0) - 1.0f), -1.0, 1.0) * bufferParameters.sampleRate());

        double lawCoef = computeLawCoefficient(sinusoid);
        pOutput[i] *= gs.frac * lawCoef;
        pOutput[i+1] *= (1.0f - gs.frac) * lawCoef;

        gs.time++;
        while (gs.time >= period) {
            // Click for debug
            //pOutput[i] = 1.0f;
            //pOutput[i+1] = 1.0f;

            // The while loop is required in case period changes the value
            gs.time -= period;
        }
    }
}
Ejemplo n.º 3
0
void FlangerEffect::processChannel(const ChannelHandle& handle,
                                   FlangerGroupState* pState,
                                   const CSAMPLE* pInput, CSAMPLE* pOutput,
                                   const mixxx::EngineParameters& bufferParameters,
                                   const EffectEnableState enableState,
                                   const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    double lfoPeriodParameter = m_pSpeedParameter->value();
    double lfoPeriodFrames;
    if (groupFeatures.has_beat_length_sec) {
        // lfoPeriodParameter is a number of beats
        lfoPeriodParameter = std::max(roundToFraction(lfoPeriodParameter, 2.0), kMinLfoBeats);
        if (m_pTripletParameter->toBool()) {
            lfoPeriodParameter /= 3.0;
        }
        lfoPeriodFrames = lfoPeriodParameter * groupFeatures.beat_length_sec
                * bufferParameters.sampleRate();
    } else {
        // lfoPeriodParameter is a number of seconds
        lfoPeriodFrames = std::max(lfoPeriodParameter, kMinLfoBeats)
                * bufferParameters.sampleRate();
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (pState->previousPeriodFrames != -1.0) {
        pState->lfoFrames *= lfoPeriodFrames / pState->previousPeriodFrames;
    }
    pState->previousPeriodFrames = lfoPeriodFrames;


    // lfoPeriodSamples is used to calculate the delay for each channel
    // independently in the loop below, so do not multiply lfoPeriodSamples by
    // the number of channels.

    CSAMPLE_GAIN mix = m_pMixParameter->value();
    RampingValue<CSAMPLE_GAIN> mixRamped(
            pState->prev_mix, mix, bufferParameters.framesPerBuffer());
    pState->prev_mix = mix;

    CSAMPLE_GAIN regen = m_pRegenParameter->value();
    RampingValue<CSAMPLE_GAIN> regenRamped(
            pState->prev_regen, regen, bufferParameters.framesPerBuffer());
    pState->prev_regen = regen;

    // With and Manual is limited by amount of amplitude that remains from width
    // to kMaxDelayMs
    double width = m_pWidthParameter->value();
    double manual = m_pManualParameter->value();
    double maxManual = kCenterDelayMs + (kMaxLfoWidthMs - width) / 2;
    double minManual = kCenterDelayMs - (kMaxLfoWidthMs - width) / 2;
    manual = math_clamp(manual, minManual, maxManual);

    RampingValue<double> widthRamped(
            pState->prev_width, width, bufferParameters.framesPerBuffer());
    pState->prev_width = width;

    RampingValue<double> manualRamped(
            pState->prev_manual, manual, bufferParameters.framesPerBuffer());
    pState->prev_manual = manual;

    CSAMPLE* delayLeft = pState->delayLeft;
    CSAMPLE* delayRight = pState->delayRight;

   for (unsigned int i = 0;
          i < bufferParameters.samplesPerBuffer();
          i += bufferParameters.channelCount()) {

        CSAMPLE_GAIN mix_ramped = mixRamped.getNext();
        CSAMPLE_GAIN regen_ramped = regenRamped.getNext();
        double width_ramped = widthRamped.getNext();
        double manual_ramped = manualRamped.getNext();

        pState->lfoFrames++;
        if (pState->lfoFrames >= lfoPeriodFrames) {
            pState->lfoFrames = 0;
        }

        float periodFraction = static_cast<float>(pState->lfoFrames) / lfoPeriodFrames;
        double delayMs = manual_ramped + width_ramped / 2 * sin(M_PI * 2.0f * periodFraction);
        double delayFrames = delayMs * bufferParameters.sampleRate() / 1000;

        SINT framePrev = (pState->delayPos - static_cast<SINT>(floor(delayFrames))
                + kBufferLenth) % kBufferLenth;
        SINT frameNext = (pState->delayPos - static_cast<SINT>(ceil(delayFrames))
                + kBufferLenth) % kBufferLenth;
        CSAMPLE prevLeft = delayLeft[framePrev];
        CSAMPLE nextLeft = delayLeft[frameNext];

        CSAMPLE prevRight = delayRight[framePrev];
        CSAMPLE nextRight = delayRight[frameNext];

        CSAMPLE frac = delayFrames - floorf(delayFrames);
        CSAMPLE delayedSampleLeft = prevLeft + frac * (nextLeft - prevLeft);
        CSAMPLE delayedSampleRight = prevRight + frac * (nextRight - prevRight);

        delayLeft[pState->delayPos] = tanh_approx(pInput[i] + regen_ramped * delayedSampleLeft);
        delayRight[pState->delayPos] = tanh_approx(pInput[i + 1] + regen_ramped * delayedSampleRight);

        pState->delayPos = (pState->delayPos + 1) % kBufferLenth;

        double gain = (1 - mix_ramped + kGainCorrection * mix_ramped);
        pOutput[i] = (pInput[i] + mix_ramped * delayedSampleLeft) / gain;
        pOutput[i + 1] = (pInput[i + 1] + mix_ramped * delayedSampleRight) / gain;
    }

    if (enableState == EffectEnableState::Disabling) {
        SampleUtil::clear(delayLeft, kBufferLenth);
        SampleUtil::clear(delayRight, kBufferLenth);
        pState->previousPeriodFrames = -1;
        pState->prev_regen = 0;
        pState->prev_mix = 0;
    }
}