void GainFilter<DataType_>::process_impl_direct(int64_t size) const { for(int channel = 0; channel < nb_output_ports; ++channel) { const DataType* ATK_RESTRICT input = converted_inputs[channel]; DataType* ATK_RESTRICT output = outputs[channel]; for(int64_t i = 0; i < size; ++i) { *(output++) = computeGain(*(input++) * threshold); } } }
void GainFilter<DataType_>::recomputeLUT() { auto gainLUT_ptr = gainLUT.data(); for(int i = 0; i < LUTsize; ++i) { if(resetRequest) { i = 0; resetRequest = false; gainLUT_ptr = gainLUT.data(); } *(gainLUT_ptr++) = computeGain(static_cast<DataType>(i) / LUTprecision); } isRunning = false; }
void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID, const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, bool throttle) { ++stats.totalMixes; // to reduce artifacts we call the HRTF functor for every source, even if throttled or silent // this ensures the correct tail from last mixed block and the correct spatialization of next first block // check if this is a server echo of a source back to itself bool isEcho = (&streamToAdd == &listeningNodeStream); glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition(); float distance = glm::max(glm::length(relativePosition), EPSILON); float gain = computeGain(listenerNodeData, listeningNodeStream, streamToAdd, relativePosition, isEcho); float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition); const int HRTF_DATASET_INDEX = 1; if (!streamToAdd.lastPopSucceeded()) { bool forceSilentBlock = true; if (!streamToAdd.getLastPopOutput().isNull()) { bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd); // in an injector, just go silent - the injector has likely ended // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence if (!isInjector) { // calculate its fade factor, which depends on how many times it's already been repeated. float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1); if (fadeFactor > 0.0f) { // apply the fadeFactor to the gain gain *= fadeFactor; forceSilentBlock = false; } } } if (forceSilentBlock) { // call renderSilent with a forced silent block to reduce artifacts // (this is not done for stereo streams since they do not go through the HRTF) if (!streamToAdd.isStereo() && !isEcho) { // get the existing listener-source HRTF object, or create a new one auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {}; hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfSilentRenders; } return; } } // grab the stream from the ring buffer AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput(); // stereo sources are not passed through HRTF if (streamToAdd.isStereo()) { for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { _mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE); } ++stats.manualStereoMixes; return; } // echo sources are not passed through HRTF if (isEcho) { for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) { auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE); _mixSamples[i] += monoSample; _mixSamples[i + 1] += monoSample; } ++stats.manualEchoMixes; return; } // get the existing listener-source HRTF object, or create a new one auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); if (streamToAdd.getLastPopOutputLoudness() == 0.0f) { // call renderSilent to reduce artifacts hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfSilentRenders; return; } if (throttle) { // call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfThrottleRenders; return; } hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfRenders; }