void DspLimiter::Process(DspChunk& chunk) { if (chunk.IsEmpty()) return; if (!m_exclusive || (chunk.GetFormat() != DspFormat::Float && chunk.GetFormat() != DspFormat::Double)) { m_active = false; return; } m_active = true; // Analyze samples float peak; if (chunk.GetFormat() == DspFormat::Double) { double largePeak = GetPeak((double*)chunk.GetData(), chunk.GetSampleCount()); peak = std::nexttoward((float)largePeak, largePeak); } else { assert(chunk.GetFormat() == DspFormat::Float); peak = GetPeak((float*)chunk.GetData(), chunk.GetSampleCount()); } // Configure limiter if (peak > 1.0f) { if (m_holdWindow <= 0) { NewTreshold(std::max(peak, 1.4f)); } else if (peak > m_peak) { NewTreshold(peak); } m_holdWindow = (int64_t)m_rate * m_channels * 10; // 10 seconds } // Apply limiter if (m_holdWindow > 0) { if (chunk.GetFormat() == DspFormat::Double) { ApplyLimiter<double>((double*)chunk.GetData(), chunk.GetSampleCount(), m_threshold); } else { assert(chunk.GetFormat() == DspFormat::Float); ApplyLimiter((float*)chunk.GetData(), chunk.GetSampleCount(), m_threshold); } m_holdWindow -= chunk.GetSampleCount(); } }
void AudioDevicePush::PushChunkToDevice(DspChunk& chunk, CAMEvent* pFilledEvent) { // Get up-to-date information on the device buffer. UINT32 bufferPadding; ThrowIfFailed(m_backend->audioClient->GetCurrentPadding(&bufferPadding)); // Find out how many frames we can write this time. const UINT32 doFrames = std::min(m_backend->deviceBufferSize - bufferPadding, (UINT32)chunk.GetFrameCount()); if (doFrames == 0) return; // Write frames to the device buffer. BYTE* deviceBuffer; ThrowIfFailed(m_backend->audioRenderClient->GetBuffer(doFrames, &deviceBuffer)); assert(chunk.GetFrameSize() == (m_backend->waveFormat->wBitsPerSample / 8 * m_backend->waveFormat->nChannels)); memcpy(deviceBuffer, chunk.GetData(), doFrames * chunk.GetFrameSize()); ThrowIfFailed(m_backend->audioRenderClient->ReleaseBuffer(doFrames, 0)); // If the buffer is fully filled, set the corresponding event (if requested). if (pFilledEvent && bufferPadding + doFrames == m_backend->deviceBufferSize) { pFilledEvent->Set(); } assert(doFrames <= chunk.GetFrameCount()); chunk.ShrinkHead(chunk.GetFrameCount() - doFrames); m_pushedFrames += doFrames; }
void DspTempo::Finish(DspChunk& chunk) { if (!m_active) return; Process(chunk); m_stouch.flush(); uint32_t undone = m_stouch.numSamples(); if (undone > 0) { DspChunk output(DspFormat::Float, m_channels, chunk.GetFrameCount() + undone, m_rate); if (!chunk.IsEmpty()) memcpy(output.GetData(), chunk.GetData(), chunk.GetSize()); m_stouch.flush(); uint32_t done = m_stouch.receiveSamples((float*)output.GetData() + chunk.GetSampleCount(), undone); assert(done == undone); output.ShrinkTail(chunk.GetFrameCount() + done); chunk = std::move(output); } }
void DspTempo::Process(DspChunk& chunk) { if (!m_active || chunk.IsEmpty()) return; assert(chunk.GetRate() == m_rate); assert(chunk.GetChannelCount() == m_channels); // DirectShow speed is in double precision, SoundTouch operates in single. // We have to adjust it dynamically. AdjustTempo(); DspChunk::ToFloat(chunk); m_stouch.putSamples((const float*)chunk.GetData(), (uint32_t)chunk.GetFrameCount()); DspChunk output(DspFormat::Float, m_channels, m_stouch.numSamples(), m_rate); uint32_t done = m_stouch.receiveSamples((float*)output.GetData(), (uint32_t)output.GetFrameCount()); assert(done == output.GetFrameCount()); output.ShrinkTail(done); auto& outSamples = (m_ftempo == m_ftempo1) ? m_outSamples1 : m_outSamples2; outSamples += done; chunk = std::move(output); }
void DspBalance::Process(DspChunk& chunk) { const float balance = m_renderer.GetBalance(); assert(balance >= -1.0f && balance <= 1.0f); if (balance == 0.0f || chunk.IsEmpty() || chunk.GetChannelCount() != 2) return; DspChunk::ToFloat(chunk); auto data = reinterpret_cast<float*>(chunk.GetData()); const float gain = std::abs(balance); for (size_t i = (balance < 0.0f ? 1 : 0), n = chunk.GetSampleCount(); i < n; i += 2) data[i] *= gain; }
DspChunk DspRate::ProcessChunk(soxr_t soxr, DspChunk& chunk) { assert(soxr); assert(!chunk.IsEmpty()); assert(chunk.GetRate() == m_inputRate); assert(chunk.GetChannelCount() == m_channels); DspChunk::ToFloat(chunk); size_t outputFrames = (size_t)(2 * (uint64_t)chunk.GetFrameCount() * m_outputRate / m_inputRate); DspChunk output(DspFormat::Float, chunk.GetChannelCount(), outputFrames, m_outputRate); size_t inputDone = 0; size_t outputDone = 0; soxr_process(soxr, chunk.GetData(), chunk.GetFrameCount(), &inputDone, output.GetData(), output.GetFrameCount(), &outputDone); assert(inputDone == chunk.GetFrameCount()); output.ShrinkTail(outputDone); return output; }
void AudioRenderer::ApplyRateCorrection(DspChunk& chunk) { CAutoLock objectLock(this); assert(m_device); assert(!m_device->IsBitstream()); assert(m_state == State_Running); if (chunk.IsEmpty()) return; const REFERENCE_TIME latency = llMulDiv(chunk.GetFrameCount(), OneSecond, chunk.GetRate(), 0) + m_device->GetStreamLatency() + OneMillisecond * 10; const REFERENCE_TIME remaining = m_device->GetEnd() - m_device->GetPosition(); REFERENCE_TIME deltaTime = 0; if (m_live) { // Rate matching. if (remaining > latency) { size_t dropFrames = (size_t)llMulDiv(m_device->GetWaveFormat()->nSamplesPerSec, remaining - latency, OneSecond, 0); dropFrames = std::min(dropFrames, chunk.GetFrameCount()); chunk.ShrinkHead(chunk.GetFrameCount() - dropFrames); DebugOut("AudioRenderer drop", dropFrames, "frames for rate matching"); } } else { // Clock matching. assert(m_externalClock); REFERENCE_TIME graphTime, myTime, myStartTime; if (SUCCEEDED(m_myClock.GetAudioClockStartTime(&myStartTime)) && SUCCEEDED(m_myClock.GetAudioClockTime(&myTime, nullptr)) && SUCCEEDED(m_graphClock->GetTime(&graphTime)) && myTime > myStartTime) { myTime -= m_device->GetSilence(); if (myTime > graphTime) { // Pad and adjust backwards. REFERENCE_TIME padTime = myTime - graphTime; assert(padTime >= 0); size_t padFrames = (size_t)llMulDiv(m_device->GetWaveFormat()->nSamplesPerSec, padTime, OneSecond, 0); if (padFrames > m_device->GetWaveFormat()->nSamplesPerSec / 33) // ~30ms threshold { DspChunk tempChunk(chunk.GetFormat(), chunk.GetChannelCount(), chunk.GetFrameCount() + padFrames, chunk.GetRate()); size_t padBytes = tempChunk.GetFrameSize() * padFrames; ZeroMemory(tempChunk.GetData(), padBytes); memcpy(tempChunk.GetData() + padBytes, chunk.GetData(), chunk.GetSize()); chunk = std::move(tempChunk); REFERENCE_TIME paddedTime = llMulDiv(padFrames, OneSecond, m_device->GetWaveFormat()->nSamplesPerSec, 0); m_myClock.OffsetSlavedClock(-paddedTime); padTime -= paddedTime; assert(padTime >= 0); DebugOut("AudioRenderer pad", paddedTime / 10000., "ms for clock matching at", m_sampleCorrection.GetLastFrameEnd() / 10000., "frame position"); } // Correct the rest with variable rate. m_dspRealtimeRate.Adjust(padTime); m_myClock.OffsetSlavedClock(-padTime); } else if (remaining > latency) { // Crop and adjust forwards. assert(myTime <= graphTime); REFERENCE_TIME dropTime = std::min(graphTime - myTime, remaining - latency); assert(dropTime >= 0); size_t dropFrames = (size_t)llMulDiv(m_device->GetWaveFormat()->nSamplesPerSec, dropTime, OneSecond, 0); dropFrames = std::min(dropFrames, chunk.GetFrameCount()); if (dropFrames > m_device->GetWaveFormat()->nSamplesPerSec / 33) // ~30ms threshold { chunk.ShrinkHead(chunk.GetFrameCount() - dropFrames); REFERENCE_TIME droppedTime = llMulDiv(dropFrames, OneSecond, m_device->GetWaveFormat()->nSamplesPerSec, 0); m_myClock.OffsetSlavedClock(droppedTime); dropTime -= droppedTime; assert(dropTime >= 0); DebugOut("AudioRenderer drop", droppedTime / 10000., "ms for clock matching at", m_sampleCorrection.GetLastFrameEnd() / 10000., "frame position"); } // Correct the rest with variable rate. m_dspRealtimeRate.Adjust(-dropTime); m_myClock.OffsetSlavedClock(dropTime); } } } }