// construct delay line if necessary, reset filter state void FlangerNode::initFilter() { PRINT(("FlangerNode::initFilter()\n")); ASSERT(m_format.u.raw_audio.format != media_raw_audio_format::wildcard.format); if(!m_pDelayBuffer) { m_pDelayBuffer = new AudioBuffer( m_format.u.raw_audio, frames_for_duration( m_format.u.raw_audio, (bigtime_t)s_fMaxDelay*1000LL)); m_pDelayBuffer->zero(); } m_framesSent = 0; m_delayWriteFrame = 0; m_fTheta = 0.0; m_fThetaInc = calc_sweep_delta(m_format.u.raw_audio, m_fSweepRate); m_fSweepBase = calc_sweep_base(m_format.u.raw_audio, m_fDelay, m_fDepth); m_fSweepFactor = calc_sweep_factor(m_format.u.raw_audio, m_fDepth); // // PRINT(( // "\tFrames %ld\n" // "\tDelay %.2f\n" // "\tDepth %.2f\n" // "\tSweepBase %.2f\n" // "\tSweepFactor %.2f\n", // m_pDelayBuffer->frames(), // m_fDelay, m_fDepth, m_fSweepBase, m_fSweepFactor)); }
void AudioFilterNode::processBuffer( BBuffer* inputBuffer, BBuffer* outputBuffer) { ASSERT(inputBuffer); ASSERT(outputBuffer); ASSERT(m_op); // create wrapper objects AudioBuffer input(m_input.format.u.raw_audio, inputBuffer); AudioBuffer output(m_output.format.u.raw_audio, outputBuffer); double sourceOffset = 0.0; uint32 destinationOffset = 0L; // when is the first frame due to be consumed? bigtime_t startTime = outputBuffer->Header()->start_time; // when is the next frame to be produced going to be consumed? bigtime_t targetTime = startTime; // when will the first frame of the next buffer be consumed? bigtime_t endTime = startTime + BufferDuration(); uint32 framesRemaining = input.frames(); while(framesRemaining) { // handle all events occurring before targetTime // +++++ bigtime_t nextEventTime = endTime; // look for next event occurring before endTime // +++++ // process up to found event, if any, or to end of buffer int64 toProcess = frames_for_duration(output.format(), nextEventTime - targetTime); ASSERT(toProcess > 0); uint32 processed = m_op->process( input, output, sourceOffset, destinationOffset, (uint32)toProcess, targetTime); if(processed < toProcess) { // +++++ in offline mode this will have to request additional buffer(s), right? PRINT(( "*** AudioFilterNode::processBuffer(): insufficient frames filled\n")); } if(toProcess > framesRemaining) framesRemaining = 0; else framesRemaining -= toProcess; // advance target time targetTime = nextEventTime; // +++++ might this drift from the real frame offset? } outputBuffer->Header()->size_used = input.frames() * bytes_per_frame(m_output.format.u.raw_audio); // PRINT(("### output size: %ld\n", outputBuffer->Header()->size_used)); }
void MixerInput::SetMixBufferFormat(int32 framerate, int32 frames) { TRACE("MixerInput::SetMixBufferFormat: framerate %ld, frames %ld\n", framerate, frames); fMixBufferFrameRate = framerate; fDebugMixBufferFrames = frames; // frames and/or framerate can be 0 (if no output is connected) if (framerate == 0 || frames == 0) { if (fMixBuffer != NULL) { rtm_free(fMixBuffer); fMixBuffer = NULL; } for (int i = 0; i < fInputChannelCount; i++) fInputChannelInfo[i].buffer_base = 0; fMixBufferFrameCount = 0; _UpdateInputChannelDestinationMask(); _UpdateInputChannelDestinations(); return; } // make fMixBufferFrameCount an integral multiple of frames, // but at least 3 times duration of our input buffer // and at least 2 times duration of the output buffer bigtime_t inputBufferLength = duration_for_frames( fInput.format.u.raw_audio.frame_rate, frames_per_buffer(fInput.format.u.raw_audio)); bigtime_t outputBufferLength = duration_for_frames(framerate, frames); bigtime_t mixerBufferLength = max_c(3 * inputBufferLength, 2 * outputBufferLength); int temp = frames_for_duration(framerate, mixerBufferLength); fMixBufferFrameCount = ((temp / frames) + 1) * frames; TRACE(" inputBufferLength %10Ld\n", inputBufferLength); TRACE(" outputBufferLength %10Ld\n", outputBufferLength); TRACE(" mixerBufferLength %10Ld\n", mixerBufferLength); TRACE(" fMixBufferFrameCount %10d\n", fMixBufferFrameCount); ASSERT((fMixBufferFrameCount % frames) == 0); fLastDataFrameWritten = -1; fFractionalFrames = 0.0; rtm_free(fMixBuffer); rtm_delete_pool(fRtmPool); int size = sizeof(float) * fInputChannelCount * fMixBufferFrameCount; if (rtm_create_pool(&fRtmPool, size) != B_OK) fRtmPool = NULL; fMixBuffer = (float*)rtm_alloc(fRtmPool, size); if (fMixBuffer == NULL) return; memset(fMixBuffer, 0, size); for (int i = 0; i < fInputChannelCount; i++) fInputChannelInfo[i].buffer_base = &fMixBuffer[i]; _UpdateInputChannelDestinationMask(); _UpdateInputChannelDestinations(); }
void MixerInput::BufferReceived(BBuffer* buffer) { void* data; size_t size; bigtime_t start; bigtime_t buffer_duration; if (!fMixBuffer) { ERROR("MixerInput::BufferReceived: dropped incoming buffer as we " "don't have a mix buffer\n"); return; } data = buffer->Data(); size = buffer->SizeUsed(); start = buffer->Header()->start_time; buffer_duration = duration_for_frames(fInput.format.u.raw_audio.frame_rate, size / bytes_per_frame(fInput.format.u.raw_audio)); if (start < 0) { ERROR("MixerInput::BufferReceived: buffer with negative start time of " "%Ld dropped\n", start); return; } // swap the byte order of this buffer, if necessary if (fInputByteSwap) fInputByteSwap->Swap(data, size); int offset = frames_for_duration(fMixBufferFrameRate, start) % fMixBufferFrameCount; PRINT(4, "MixerInput::BufferReceived: buffer start %10Ld, offset %6d\n", start, offset); int in_frames = size / bytes_per_frame(fInput.format.u.raw_audio); double frames = ((double)in_frames * fMixBufferFrameRate) / fInput.format.u.raw_audio.frame_rate; int out_frames = int(frames); fFractionalFrames += frames - double(out_frames); if (fFractionalFrames >= 1.0) { fFractionalFrames -= 1.0; out_frames++; } // if fLastDataFrameWritten != -1, then we have a valid last position // and can do glitch compensation if (fLastDataFrameWritten >= 0) { int expected_frame = (fLastDataFrameWritten + 1) % fMixBufferFrameCount; if (offset != expected_frame) { // due to rounding and other errors, offset might be off by +/- 1 // this is not really a bad glitch, we just adjust the position if (offset == fLastDataFrameWritten) { // printf("MixerInput::BufferReceived: -1 frame GLITCH! last " // "frame was %ld, expected frame was %d, new frame is %d\n", // fLastDataFrameWritten, expected_frame, offset); offset = expected_frame; } else if (offset == ((fLastDataFrameWritten + 2) % fMixBufferFrameCount)) { // printf("MixerInput::BufferReceived: +1 frame GLITCH! last " // "frame was %ld, expected frame was %d, new frame is %d\n", // fLastDataFrameWritten, expected_frame, offset); offset = expected_frame; } else { printf("MixerInput::BufferReceived: GLITCH! last frame was " "%4ld, expected frame was %4d, new frame is %4d\n", fLastDataFrameWritten, expected_frame, offset); if (start > fLastDataAvailableTime) { if ((start - fLastDataAvailableTime) < (buffer_duration / 10)) { // buffer is less than 10% of buffer duration too late printf("short glitch, buffer too late, time delta " "%Ld\n", start - fLastDataAvailableTime); offset = expected_frame; out_frames++; } else { // buffer more than 10% of buffer duration too late // TODO: zerofill buffer printf("MAJOR glitch, buffer too late, time delta " "%Ld\n", start - fLastDataAvailableTime); } } else { // start <= fLastDataAvailableTime // the new buffer is too early if ((fLastDataAvailableTime - start) < (buffer_duration / 10)) { // buffer is less than 10% of buffer duration too early printf("short glitch, buffer too early, time delta " "%Ld\n", fLastDataAvailableTime - start); offset = expected_frame; out_frames--; if (out_frames < 1) out_frames = 1; } else { // buffer more than 10% of buffer duration too early // TODO: zerofill buffer printf("MAJOR glitch, buffer too early, time delta " "%Ld\n", fLastDataAvailableTime - start); } } } } } // printf("data arrived for %10Ld to %10Ld, storing at frames %ld to %ld\n", // start, // start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, // frames_per_buffer(fInput.format.u.raw_audio)), offset, // offset + out_frames); if (offset + out_frames > fMixBufferFrameCount) { int out_frames1 = fMixBufferFrameCount - offset; int out_frames2 = out_frames - out_frames1; int in_frames1 = (out_frames1 * in_frames) / out_frames; int in_frames2 = in_frames - in_frames1; // printf("at %10Ld, data arrived for %10Ld to %10Ld, storing at " // "frames %ld to %ld and %ld to %ld\n", fCore->fTimeSource->Now(), // start, // start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, // frames_per_buffer(fInput.format.u.raw_audio)), offset, // offset + out_frames1 - 1, 0, out_frames2 - 1); PRINT(3, "at %10Ld, data arrived for %10Ld to %10Ld, storing at " "frames %ld to %ld and %ld to %ld\n", fCore->fTimeSource->Now(), start, start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, frames_per_buffer(fInput.format.u.raw_audio)), offset, offset + out_frames1 - 1, 0, out_frames2 - 1); PRINT(5, " in_frames %5d, out_frames %5d, in_frames1 %5d, " "out_frames1 %5d, in_frames2 %5d, out_frames2 %5d\n", in_frames, out_frames, in_frames1, out_frames1, in_frames2, out_frames2); fLastDataFrameWritten = out_frames2 - 1; // convert offset from frames into bytes offset *= sizeof(float) * fInputChannelCount; for (int i = 0; i < fInputChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(data) + i * bytes_per_sample(fInput.format.u.raw_audio), bytes_per_frame(fInput.format.u.raw_audio), in_frames1, reinterpret_cast<char*>(fInputChannelInfo[i].buffer_base) + offset, fInputChannelCount * sizeof(float), out_frames1, fInputChannelInfo[i].gain); fResampler[i]->Resample( reinterpret_cast<char*>(data) + i * bytes_per_sample(fInput.format.u.raw_audio) + in_frames1 * bytes_per_frame(fInput.format.u.raw_audio), bytes_per_frame(fInput.format.u.raw_audio), in_frames2, reinterpret_cast<char*>(fInputChannelInfo[i].buffer_base), fInputChannelCount * sizeof(float), out_frames2, fInputChannelInfo[i].gain); } } else { // printf("at %10Ld, data arrived for %10Ld to %10Ld, storing at " // "frames %ld to %ld\n", fCore->fTimeSource->Now(), start, // start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, // frames_per_buffer(fInput.format.u.raw_audio)), offset, // offset + out_frames - 1); PRINT(3, "at %10Ld, data arrived for %10Ld to %10Ld, storing at " "frames %ld to %ld\n", fCore->fTimeSource->Now(), start, start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, frames_per_buffer(fInput.format.u.raw_audio)), offset, offset + out_frames - 1); PRINT(5, " in_frames %5d, out_frames %5d\n", in_frames, out_frames); fLastDataFrameWritten = offset + out_frames - 1; // convert offset from frames into bytes offset *= sizeof(float) * fInputChannelCount; for (int i = 0; i < fInputChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(data) + i * bytes_per_sample(fInput.format.u.raw_audio), bytes_per_frame(fInput.format.u.raw_audio), in_frames, reinterpret_cast<char*>(fInputChannelInfo[i].buffer_base) + offset, fInputChannelCount * sizeof(float), out_frames, fInputChannelInfo[i].gain); } } fLastDataAvailableTime = start + buffer_duration; }
void MixerCore::_MixThread() { // The broken BeOS R5 multiaudio node starts with time 0, // then publishes negative times for about 50ms, publishes 0 // again until it finally reaches time values > 0 if (!LockFromMixThread()) return; bigtime_t start = fTimeSource->Now(); Unlock(); while (start <= 0) { TRACE("MixerCore: delaying _MixThread start, timesource is at %Ld\n", start); snooze(5000); if (!LockFromMixThread()) return; start = fTimeSource->Now(); Unlock(); } if (!LockFromMixThread()) return; bigtime_t latency = max((bigtime_t)3600, bigtime_t(0.4 * buffer_duration( fOutput->MediaOutput().format.u.raw_audio))); // TODO: when the format changes while running, everything is wrong! bigtime_t bufferRequestTimeout = buffer_duration( fOutput->MediaOutput().format.u.raw_audio) / 2; TRACE("MixerCore: starting _MixThread at %Ld with latency %Ld and " "downstream latency %Ld, bufferRequestTimeout %Ld\n", start, latency, fDownstreamLatency, bufferRequestTimeout); // We must read from the input buffer at a position (pos) that is always // a multiple of fMixBufferFrameCount. int64 temp = frames_for_duration(fMixBufferFrameRate, start); int64 frameBase = ((temp / fMixBufferFrameCount) + 1) * fMixBufferFrameCount; bigtime_t timeBase = duration_for_frames(fMixBufferFrameRate, frameBase); Unlock(); TRACE("MixerCore: starting _MixThread, start %Ld, timeBase %Ld, " "frameBase %Ld\n", start, timeBase, frameBase); ASSERT(fMixBufferFrameCount > 0); #if DEBUG uint64 bufferIndex = 0; #endif typedef RtList<chan_info> chan_info_list; chan_info_list inputChanInfos[MAX_CHANNEL_TYPES]; BStackOrHeapArray<chan_info_list, 16> mixChanInfos(fMixBufferChannelCount); // TODO: this does not support changing output channel count bigtime_t eventTime = timeBase; int64 framePos = 0; for (;;) { if (!LockFromMixThread()) return; bigtime_t waitUntil = fTimeSource->RealTimeFor(eventTime, 0) - latency - fDownstreamLatency; Unlock(); status_t rv = acquire_sem_etc(fMixThreadWaitSem, 1, B_ABSOLUTE_TIMEOUT, waitUntil); if (rv == B_INTERRUPTED) continue; if (rv != B_TIMED_OUT && rv < B_OK) return; if (!LockWithTimeout(10000)) { ERROR("MixerCore: LockWithTimeout failed\n"); continue; } // no inputs or output muted, skip further processing and just send an // empty buffer if (fInputs->IsEmpty() || fOutput->IsMuted()) { int size = fOutput->MediaOutput().format.u.raw_audio.buffer_size; BBuffer* buffer = fBufferGroup->RequestBuffer(size, bufferRequestTimeout); if (buffer != NULL) { memset(buffer->Data(), 0, size); // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = size; hdr->time_source = fTimeSource->ID(); hdr->start_time = eventTime; if (fNode->SendBuffer(buffer, fOutput) != B_OK) { #if DEBUG ERROR("MixerCore: SendBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: SendBuffer failed\n"); #endif buffer->Recycle(); } } else { #if DEBUG ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: RequestBuffer failed\n"); #endif } goto schedule_next_event; } int64 currentFramePos; currentFramePos = frameBase + framePos; // mix all data from all inputs into the mix buffer ASSERT(currentFramePos % fMixBufferFrameCount == 0); PRINT(4, "create new buffer event at %Ld, reading input frames at " "%Ld\n", eventTime, currentFramePos); // Init the channel information for each MixerInput. for (int i = 0; MixerInput* input = Input(i); i++) { int count = input->GetMixerChannelCount(); for (int channel = 0; channel < count; channel++) { int type; const float* base; uint32 sampleOffset; float gain; if (!input->GetMixerChannelInfo(channel, currentFramePos, eventTime, &base, &sampleOffset, &type, &gain)) { continue; } if (type < 0 || type >= MAX_CHANNEL_TYPES) continue; chan_info* info = inputChanInfos[type].Create(); info->base = (const char*)base; info->sample_offset = sampleOffset; info->gain = gain; } } for (int channel = 0; channel < fMixBufferChannelCount; channel++) { int sourceCount = fOutput->GetOutputChannelSourceCount(channel); for (int i = 0; i < sourceCount; i++) { int type; float gain; fOutput->GetOutputChannelSourceInfoAt(channel, i, &type, &gain); if (type < 0 || type >= MAX_CHANNEL_TYPES) continue; int count = inputChanInfos[type].CountItems(); for (int j = 0; j < count; j++) { chan_info* info = inputChanInfos[type].ItemAt(j); chan_info* newInfo = mixChanInfos[channel].Create(); newInfo->base = info->base; newInfo->sample_offset = info->sample_offset; newInfo->gain = info->gain * gain; } } } memset(fMixBuffer, 0, fMixBufferChannelCount * fMixBufferFrameCount * sizeof(float)); for (int channel = 0; channel < fMixBufferChannelCount; channel++) { PRINT(5, "_MixThread: channel %d has %d sources\n", channel, mixChanInfos[channel].CountItems()); int count = mixChanInfos[channel].CountItems(); for (int i = 0; i < count; i++) { chan_info* info = mixChanInfos[channel].ItemAt(i); PRINT(5, "_MixThread: base %p, sample-offset %2d, gain %.3f\n", info->base, info->sample_offset, info->gain); // This looks slightly ugly, but the current GCC will generate // the fastest code this way. // fMixBufferFrameCount is always > 0. uint32 dstSampleOffset = fMixBufferChannelCount * sizeof(float); uint32 srcSampleOffset = info->sample_offset; register char* dst = (char*)&fMixBuffer[channel]; register char* src = (char*)info->base; register float gain = info->gain; register int j = fMixBufferFrameCount; do { *(float*)dst += *(const float*)src * gain; dst += dstSampleOffset; src += srcSampleOffset; } while (--j); } } // request a buffer BBuffer* buffer; buffer = fBufferGroup->RequestBuffer( fOutput->MediaOutput().format.u.raw_audio.buffer_size, bufferRequestTimeout); if (buffer != NULL) { // copy data from mix buffer into output buffer for (int i = 0; i < fMixBufferChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(fMixBuffer) + i * sizeof(float), fMixBufferChannelCount * sizeof(float), fMixBufferFrameCount, reinterpret_cast<char*>(buffer->Data()) + (i * bytes_per_sample( fOutput->MediaOutput().format.u.raw_audio)), bytes_per_frame(fOutput->MediaOutput().format.u.raw_audio), frames_per_buffer( fOutput->MediaOutput().format.u.raw_audio), fOutputGain * fOutput->GetOutputChannelGain(i)); } PRINT(4, "send buffer, inframes %ld, outframes %ld\n", fMixBufferFrameCount, frames_per_buffer(fOutput->MediaOutput().format.u.raw_audio)); // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = fOutput->MediaOutput().format.u.raw_audio.buffer_size; hdr->time_source = fTimeSource->ID(); hdr->start_time = eventTime; // swap byte order if necessary fOutput->AdjustByteOrder(buffer); // send the buffer status_t res = fNode->SendBuffer(buffer, fOutput); if (res != B_OK) { #if DEBUG ERROR("MixerCore: SendBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: SendBuffer failed\n"); #endif buffer->Recycle(); } } else { #if DEBUG ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: RequestBuffer failed\n"); #endif } // make all lists empty for (int i = 0; i < MAX_CHANNEL_TYPES; i++) inputChanInfos[i].MakeEmpty(); for (int i = 0; i < fOutput->GetOutputChannelCount(); i++) mixChanInfos[i].MakeEmpty(); schedule_next_event: // schedule next event framePos += fMixBufferFrameCount; eventTime = timeBase + bigtime_t((1000000LL * framePos) / fMixBufferFrameRate); Unlock(); #if DEBUG bufferIndex++; #endif } }