示例#1
0
Result SoundSourceWV::tryOpen(const AudioSourceConfig& audioSrcCfg) {
    DEBUG_ASSERT(!m_wpc);
    char msg[80]; // hold possible error message
    int openFlags = OPEN_WVC | OPEN_NORMALIZE;
    if ((kChannelCountMono == audioSrcCfg.channelCountHint) ||
            (kChannelCountStereo == audioSrcCfg.channelCountHint)) {
        openFlags |= OPEN_2CH_MAX;
    }
    m_wpc = WavpackOpenFileInput(
            getLocalFileNameBytes().constData(), msg, openFlags, 0);
    if (!m_wpc) {
        qDebug() << "SSWV::open: failed to open file : " << msg;
        return ERR;
    }

    setChannelCount(WavpackGetReducedChannels(m_wpc));
    setFrameRate(WavpackGetSampleRate(m_wpc));
    setFrameCount(WavpackGetNumSamples(m_wpc));

    if (WavpackGetMode(m_wpc) & MODE_FLOAT) {
        m_sampleScaleFactor = CSAMPLE_PEAK;
    } else {
        const int bitsPerSample = WavpackGetBitsPerSample(m_wpc);
        const uint32_t wavpackPeakSampleValue = uint32_t(1)
                << (bitsPerSample - 1);
        m_sampleScaleFactor = CSAMPLE_PEAK / CSAMPLE(wavpackPeakSampleValue);
    }

    return OK;
}
示例#2
0
SINT SoundSourceWV::readSampleFrames(
        SINT numberOfFrames, CSAMPLE* sampleBuffer) {
    if (sampleBuffer == nullptr) {
        // NOTE(uklotzde): The WavPack API does not provide any
        // functions for skipping samples in the audio stream. Calling
        // API functions with a nullptr buffer does not return. Since
        // we don't want to read samples into a temporary buffer that
        // has to be allocated we are seeking to the position after
        // the skipped samples.
        SINT curFrameIndexBefore = m_curFrameIndex;
        SINT curFrameIndexAfter = seekSampleFrame(m_curFrameIndex + numberOfFrames);
        DEBUG_ASSERT(curFrameIndexBefore <= curFrameIndexAfter);
        DEBUG_ASSERT(m_curFrameIndex == curFrameIndexAfter);
        return curFrameIndexAfter - curFrameIndexBefore;
    }
    // static assert: sizeof(CSAMPLE) == sizeof(int32_t)
    SINT unpackCount = WavpackUnpackSamples(m_wpc,
            reinterpret_cast<int32_t*>(sampleBuffer), numberOfFrames);
    DEBUG_ASSERT(unpackCount >= 0);
    if (!(WavpackGetMode(m_wpc) & MODE_FLOAT)) {
        // signed integer -> float
        const SINT sampleCount = frames2samples(unpackCount);
        for (SINT i = 0; i < sampleCount; ++i) {
            const int32_t sampleValue =
                    reinterpret_cast<int32_t*>(sampleBuffer)[i];
            sampleBuffer[i] = CSAMPLE(sampleValue) * m_sampleScaleFactor;
        }
    }
    m_curFrameIndex += unpackCount;
    return unpackCount;
}
示例#3
0
void FlangerEffect::processChannel(const ChannelHandle& handle,
                                   FlangerGroupState* pState,
                                   const CSAMPLE* pInput, CSAMPLE* pOutput,
                                   const unsigned int numSamples,
                                   const unsigned int sampleRate,
                                   const EffectProcessor::EnableState enableState,
                                   const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);
    Q_UNUSED(enableState);
    Q_UNUSED(groupFeatures);
    Q_UNUSED(sampleRate);
    CSAMPLE lfoPeriod = m_pPeriodParameter->value();
    CSAMPLE lfoDepth = m_pDepthParameter->value();
    // Unused in EngineFlanger
    // CSAMPLE lfoDelay = m_pDelayParameter ?
    //         m_pDelayParameter->value().toDouble() : 0.0f;

    // TODO(rryan) check ranges
    // period needs to be >=0
    // delay needs to be >=0
    // depth is ???

    CSAMPLE* delayLeft = pState->delayLeft;
    CSAMPLE* delayRight = pState->delayRight;

    const int kChannels = 2;
    for (unsigned int i = 0; i < numSamples; i += kChannels) {
        delayLeft[pState->delayPos] = pInput[i];
        delayRight[pState->delayPos] = pInput[i+1];

        pState->delayPos = (pState->delayPos + 1) % kMaxDelay;

        pState->time++;
        if (pState->time > lfoPeriod) {
            pState->time = 0;
        }

        CSAMPLE periodFraction = CSAMPLE(pState->time) / lfoPeriod;
        CSAMPLE delay = kAverageDelayLength + kLfoAmplitude * sin(M_PI * 2.0f * periodFraction);

        int framePrev = (pState->delayPos - int(delay) + kMaxDelay - 1) % kMaxDelay;
        int frameNext = (pState->delayPos - int(delay) + kMaxDelay    ) % kMaxDelay;
        CSAMPLE prevLeft = delayLeft[framePrev];
        CSAMPLE nextLeft = delayLeft[frameNext];

        CSAMPLE prevRight = delayRight[framePrev];
        CSAMPLE nextRight = delayRight[frameNext];

        CSAMPLE frac = delay - floorf(delay);
        CSAMPLE delayedSampleLeft = prevLeft + frac * (nextLeft - prevLeft);
        CSAMPLE delayedSampleRight = prevRight + frac * (nextRight - prevRight);

        pOutput[i] = pInput[i] + lfoDepth * delayedSampleLeft;
        pOutput[i+1] = pInput[i+1] + lfoDepth * delayedSampleRight;
    }
}
示例#4
0
SINT SoundSourceWV::readSampleFrames(
        SINT numberOfFrames, CSAMPLE* sampleBuffer) {
    // static assert: sizeof(CSAMPLE) == sizeof(int32_t)
    SINT unpackCount = WavpackUnpackSamples(m_wpc,
            reinterpret_cast<int32_t*>(sampleBuffer), numberOfFrames);
    if (!(WavpackGetMode(m_wpc) & MODE_FLOAT)) {
        // signed integer -> float
        const SINT sampleCount = frames2samples(unpackCount);
        for (SINT i = 0; i < sampleCount; ++i) {
            const int32_t sampleValue =
                    reinterpret_cast<int32_t*>(sampleBuffer)[i];
            sampleBuffer[i] = CSAMPLE(sampleValue) * m_sampleScaleFactor;
        }
    }
    return unpackCount;
}
示例#5
0
void SoundSourceFLAC::flacMetadata(const FLAC__StreamMetadata* metadata) {
    // https://xiph.org/flac/api/group__flac__stream__decoder.html#ga43e2329c15731c002ac4182a47990f85
    // "...one STREAMINFO block, followed by zero or more other metadata blocks."
    // "...by default the decoder only calls the metadata callback for the STREAMINFO block..."
    // "...always before the first audio frame (i.e. write callback)."
    switch (metadata->type) {
    case FLAC__METADATA_TYPE_STREAMINFO:
    {
        setChannelCount(metadata->data.stream_info.channels);
        setSampleRate(metadata->data.stream_info.sample_rate);
        initFrameIndexRangeOnce(
                IndexRange::forward(
                        0,
                        metadata->data.stream_info.total_samples));

        const unsigned bitsPerSample = metadata->data.stream_info.bits_per_sample;
        DEBUG_ASSERT(kBitsPerSampleDefault != bitsPerSample);
        if (kBitsPerSampleDefault == m_bitsPerSample) {
            // not set before
            m_bitsPerSample = bitsPerSample;
            m_sampleScaleFactor = CSAMPLE_PEAK
                    / CSAMPLE(FLAC__int32(1) << bitsPerSample);
        } else {
            // already set before -> check for consistency
            if (bitsPerSample != m_bitsPerSample) {
                kLogger.warning() << "Unexpected bits per sample:"
                        << bitsPerSample << " <> " << m_bitsPerSample;
            }
        }
        m_maxBlocksize = metadata->data.stream_info.max_blocksize;
        if (0 >= m_maxBlocksize) {
            kLogger.warning() << "Invalid max. blocksize" << m_maxBlocksize;
        }
        const SINT sampleBufferCapacity =
                m_maxBlocksize * channelCount();
        if (m_sampleBuffer.capacity() < sampleBufferCapacity) {
            m_sampleBuffer.adjustCapacity(sampleBufferCapacity);
        }
        break;
    }
    default:
        // Ignore all other metadata types
        break;
    }
}
示例#6
0
ReadableSampleFrames SoundSourceWV::readSampleFramesClamped(
        WritableSampleFrames writableSampleFrames) {

    const SINT firstFrameIndex = writableSampleFrames.frameIndexRange().start();

    if (m_curFrameIndex != firstFrameIndex) {
        if (WavpackSeekSample(m_wpc, firstFrameIndex)) {
            m_curFrameIndex = firstFrameIndex;
        } else {
            kLogger.warning()
                    << "Could not seek to first frame index"
                    << firstFrameIndex;
            m_curFrameIndex = WavpackGetSampleIndex(m_wpc);
            return ReadableSampleFrames(IndexRange::between(m_curFrameIndex, m_curFrameIndex));
        }
    }
    DEBUG_ASSERT(m_curFrameIndex == firstFrameIndex);

    const SINT numberOfFramesTotal = writableSampleFrames.frameLength();

    static_assert(sizeof(CSAMPLE) == sizeof(int32_t),
            "CSAMPLE and int32_t must have the same size");
    CSAMPLE* pOutputBuffer = writableSampleFrames.writableData();
    SINT unpackCount = WavpackUnpackSamples(m_wpc,
            reinterpret_cast<int32_t*>(pOutputBuffer), numberOfFramesTotal);
    DEBUG_ASSERT(unpackCount >= 0);
    DEBUG_ASSERT(unpackCount <= numberOfFramesTotal);
    if (!(WavpackGetMode(m_wpc) & MODE_FLOAT)) {
        // signed integer -> float
        const SINT sampleCount = frames2samples(unpackCount);
        for (SINT i = 0; i < sampleCount; ++i) {
            const int32_t sampleValue =
                    *reinterpret_cast<int32_t*>(pOutputBuffer);
            *pOutputBuffer++ = CSAMPLE(sampleValue) * m_sampleScaleFactor;
        }
    }
    const auto resultRange = IndexRange::forward(m_curFrameIndex, unpackCount);
    m_curFrameIndex += unpackCount;
    return ReadableSampleFrames(
            resultRange,
            SampleBuffer::ReadableSlice(
                    writableSampleFrames.writableData(),
                    frames2samples(unpackCount)));
}
示例#7
0
void AutoPanEffect::processChannel(
          const ChannelHandle& handle, AutoPanGroupState* pGroupState,
          const CSAMPLE* pInput, CSAMPLE* pOutput,
          const mixxx::EngineParameters& bufferParameters,
          const EffectEnableState enableState,
          const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    if (enableState == EffectEnableState::Disabled) {
        return;
    }

    AutoPanGroupState& gs = *pGroupState;
    double width = m_pWidthParameter->value();
    double period = m_pPeriodParameter->value();
    double smoothing = 0.5 - m_pSmoothingParameter->value();

    if (groupFeatures.has_beat_length_sec) {
        // period is a number of beats
        double beats = std::max(roundToFraction(period, 2), 0.25);
        period = beats * groupFeatures.beat_length_sec * bufferParameters.sampleRate();

        // TODO(xxx) sync phase
        //if (groupFeatures.has_beat_fraction) {

    } else {
        // period is a number of seconds
        period = std::max(period, 0.25) * bufferParameters.sampleRate();
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (gs.m_dPreviousPeriod != -1.0) {
        gs.time *= period / gs.m_dPreviousPeriod;
    }


    gs.m_dPreviousPeriod = period;

    if (gs.time >= period || enableState == EffectEnableState::Enabling) {
        gs.time = 0;
    }

    // Normally, the position goes from 0 to 1 linearly. Here we make steps at
    // 0.25 and 0.75 to have the sound fully on the right or fully on the left.
    // At the end, the "position" value can describe a sinusoid or a square
    // curve depending of the size of those steps.

    // coef of the slope
    // a = (y2 - y1) / (x2 - x1)
    //       1  / ( 1 - 2 * stepfrac)
    float a = smoothing != 0.5f ? 1.0f / (1.0f - smoothing * 2.0f) : 1.0f;

    // size of a segment of slope (controlled by the "smoothing" parameter)
    float u = (0.5f - smoothing) / 2.0f;

    gs.frac.setRampingThreshold(kPositionRampingThreshold);

    double sinusoid = 0;

    // NOTE: Assuming engine is working in stereo.
    for (unsigned int i = 0; i + 1 < bufferParameters.samplesPerBuffer(); i += 2) {

        CSAMPLE periodFraction = CSAMPLE(gs.time) / period;

        // current quarter in the trigonometric circle
        float quarter = floorf(periodFraction * 4.0f);

        // part of the period fraction being a step (not in the slope)
        CSAMPLE stepsFractionPart = floorf((quarter + 1.0f) / 2.0f) * smoothing;

        // float inInterval = fmod( periodFraction, (period / 2.0) );
        float inStepInterval = fmod(periodFraction, 0.5f);

        CSAMPLE angleFraction;
        if (inStepInterval > u && inStepInterval < (u + smoothing)) {
            // at full left or full right
            angleFraction = quarter < 2.0f ? 0.25f : 0.75f;
        } else {
            // in the slope (linear function)
            angleFraction = (periodFraction - stepsFractionPart) * a;
        }

        // transforms the angleFraction into a sinusoid.
        // The width parameter modulates the two limits. if width values 0.5,
        // the limits will be 0.25 and 0.75. If it's 0, it will be 0.5 and 0.5
        // so the sound will be stuck at the center. If it values 1, the limits
        // will be 0 and 1 (full left and full right).
        sinusoid = sin(M_PI * 2.0f * angleFraction) * width;
        gs.frac.setWithRampingApplied((sinusoid + 1.0f) / 2.0f);

        // apply the delay
        gs.delay->process(&pInput[i], &pOutput[i],
                -0.005 * math_clamp(((gs.frac * 2.0) - 1.0f), -1.0, 1.0) * bufferParameters.sampleRate());

        double lawCoef = computeLawCoefficient(sinusoid);
        pOutput[i] *= gs.frac * lawCoef;
        pOutput[i+1] *= (1.0f - gs.frac) * lawCoef;

        gs.time++;
        while (gs.time >= period) {
            // Click for debug
            //pOutput[i] = 1.0f;
            //pOutput[i+1] = 1.0f;

            // The while loop is required in case period changes the value
            gs.time -= period;
        }
    }
}
示例#8
0
void AutoPanEffect::processChannel(const ChannelHandle& handle, PanGroupState* pGroupState,
                              const CSAMPLE* pInput,
                              CSAMPLE* pOutput, const unsigned int numSamples,
                              const unsigned int sampleRate,
                              const EffectProcessor::EnableState enableState,
                              const GroupFeatureState& groupFeatures) {
    Q_UNUSED(handle);

    if (enableState == EffectProcessor::DISABLED) {
        return;
    }

    PanGroupState& gs = *pGroupState;
    double width = m_pWidthParameter->value();
    double period = m_pPeriodParameter->value();
    double periodUnit = m_pPeriodUnitParameter->value();
    double smoothing = 0.5-m_pSmoothingParameter->value();

    // When the period knob is between max and max-1, the time is paused.
    // Time shouldn't be paused while enabling state as the sound
    // will be stuck in the middle even if the smoothing is at max.
    bool timePaused = period > m_pPeriodParameter->maximum() - 1
            && enableState != EffectProcessor::ENABLING;

    if (periodUnit == 1 && groupFeatures.has_beat_length) {
        // floor the param on one of these values :
        // 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, 32, 64, 128

        int i = 0;
        while (period > m_pPeriodParameter->minimum()) {
            period /= 2;
            i++;
        }

        double beats = m_pPeriodParameter->minimum();
        while (i != 0.0) {
            beats *= 2;
            i--;
        }

        period = groupFeatures.beat_length * beats;
    } else {
        // max period is 128 seconds
        period *= sampleRate;
    }

    // When the period is changed, the position of the sound shouldn't
    // so time need to be recalculated
    if (gs.m_dPreviousPeriod != -1.0) {
        gs.time *= period / gs.m_dPreviousPeriod;
    }
    gs.m_dPreviousPeriod = period;


    if (gs.time > period || enableState == EffectProcessor::ENABLING) {
        gs.time = 0;
    }


    // Normally, the position goes from 0 to 1 linearly. Here we make steps at
    // 0.25 and 0.75 to have the sound fully on the right or fully on the left.
    // At the end, the "position" value can describe a sinusoid or a square
    // curve depending of the size of those steps.

    // coef of the slope
    // a = (y2 - y1) / (x2 - x1)
    //       1  / ( 1 - 2 * stepfrac)
    float a = smoothing != 0.5f ? 1.0f / (1.0f - smoothing * 2.0f) : 1.0f;

    // size of a segment of slope (controled by the "smoothing" parameter)
    float u = (0.5f - smoothing) / 2.0f;

    gs.frac.setRampingThreshold(kPositionRampingThreshold);

    double sinusoid = 0;

    for (unsigned int i = 0; i + 1 < numSamples; i += 2) {

        CSAMPLE periodFraction = CSAMPLE(gs.time) / period;

        // current quarter in the trigonometric circle
        float quarter = floorf(periodFraction * 4.0f);

        // part of the period fraction being a step (not in the slope)
        CSAMPLE stepsFractionPart = floorf((quarter + 1.0f) / 2.0f) * smoothing;

        // float inInterval = fmod( periodFraction, (period / 2.0) );
        float inStepInterval = fmod(periodFraction, 0.5f);

        CSAMPLE angleFraction;
        if (inStepInterval > u && inStepInterval < (u + smoothing)) {
            // at full left or full right
            angleFraction = quarter < 2.0f ? 0.25f : 0.75f;
        } else {
            // in the slope (linear function)
            angleFraction = (periodFraction - stepsFractionPart) * a;
        }

        // transforms the angleFraction into a sinusoid.
        // The width parameter modulates the two limits. if width values 0.5,
        // the limits will be 0.25 and 0.75. If it's 0, it will be 0.5 and 0.5
        // so the sound will be stuck at the center. If it values 1, the limits
        // will be 0 and 1 (full left and full right).
        sinusoid = sin(M_PI * 2.0f * angleFraction) * width;
        gs.frac.setWithRampingApplied((sinusoid + 1.0f) / 2.0f);

        // apply the delay
        gs.delay->process(&pInput[i], &pOutput[i],
                -0.005 * math_clamp(((gs.frac * 2.0) - 1.0f), -1.0, 1.0) * sampleRate);

        double lawCoef = computeLawCoefficient(sinusoid);
        pOutput[i] *= gs.frac * lawCoef;
        pOutput[i+1] *= (1.0f - gs.frac) * lawCoef;

        // The time shouldn't be paused if the position has not its
        // expected value due to ramping
        if (!timePaused || gs.frac.ramped) {
            gs.time++;
        }
    }
}
示例#9
0
void SoundSourceFLAC::flacMetadata(const FLAC__StreamMetadata* metadata) {
    // https://xiph.org/flac/api/group__flac__stream__decoder.html#ga43e2329c15731c002ac4182a47990f85
    // "...one STREAMINFO block, followed by zero or more other metadata blocks."
    // "...by default the decoder only calls the metadata callback for the STREAMINFO block..."
    // "...always before the first audio frame (i.e. write callback)."
    switch (metadata->type) {
    case FLAC__METADATA_TYPE_STREAMINFO:
    {
        const SINT channelCount = metadata->data.stream_info.channels;
        if (isValidChannelCount(channelCount)) {
            if (hasChannelCount()) {
                // already set before -> check for consistency
                if (getChannelCount() != channelCount) {
                    qWarning() << "Unexpected channel count:"
                            << channelCount << " <> " << getChannelCount();
                }
            } else {
                // not set before
                setChannelCount(channelCount);
            }
        } else {
            qWarning() << "Invalid channel count:"
                    << channelCount;
        }
        const SINT samplingRate = metadata->data.stream_info.sample_rate;
        if (isValidSamplingRate(samplingRate)) {
            if (hasSamplingRate()) {
                // already set before -> check for consistency
                if (getSamplingRate() != samplingRate) {
                    qWarning() << "Unexpected sampling rate:"
                            << samplingRate << " <> " << getSamplingRate();
                }
            } else {
                // not set before
                setSamplingRate(samplingRate);
            }
        } else {
            qWarning() << "Invalid sampling rate:"
                    << samplingRate;
        }
        const SINT frameCount = metadata->data.stream_info.total_samples;
        DEBUG_ASSERT(isValidFrameCount(frameCount));
        if (isEmpty()) {
            // not set before
            setFrameCount(frameCount);
        } else {
            // already set before -> check for consistency
            if (getFrameCount() != frameCount) {
                qWarning() << "Unexpected frame count:"
                        << frameCount << " <> " << getFrameCount();
            }
        }
        const unsigned bitsPerSample = metadata->data.stream_info.bits_per_sample;
        DEBUG_ASSERT(kBitsPerSampleDefault != bitsPerSample);
        if (kBitsPerSampleDefault == m_bitsPerSample) {
            // not set before
            m_bitsPerSample = bitsPerSample;
            m_sampleScaleFactor = CSAMPLE_PEAK
                    / CSAMPLE(FLAC__int32(1) << bitsPerSample);
        } else {
            // already set before -> check for consistency
            if (bitsPerSample != m_bitsPerSample) {
                qWarning() << "Unexpected bits per sample:"
                        << bitsPerSample << " <> " << m_bitsPerSample;
            }
        }
        m_maxBlocksize = metadata->data.stream_info.max_blocksize;
        if (0 >= m_maxBlocksize) {
            qWarning() << "Invalid max. blocksize" << m_maxBlocksize;
        }
        const SINT sampleBufferCapacity =
                m_maxBlocksize * getChannelCount();
        m_sampleBuffer.resetCapacity(sampleBufferCapacity);
        break;
    }
    default:
        // Ignore all other metadata types
        break;
    }
}