status_t SoundPlayNode::AllocateBuffers() { CALLED(); // allocate enough buffers to span our downstream latency, plus one size_t size = fOutput.format.u.raw_audio.buffer_size; int32 count = int32(fLatency / BufferDuration() + 1 + 1); TRACE("SoundPlayNode::AllocateBuffers: latency = %Ld, buffer duration " "= %Ld, count %ld\n", fLatency, BufferDuration(), count); if (count < 3) count = 3; TRACE("SoundPlayNode::AllocateBuffers: creating group of %ld buffers, " "size = %lu\n", count, size); fBufferGroup = new BBufferGroup(size, count); if (fBufferGroup->InitCheck() != B_OK) { ERROR("SoundPlayNode::AllocateBuffers: BufferGroup::InitCheck() " "failed\n"); } return fBufferGroup->InitCheck(); }
void ToneProducer::AllocateBuffers() { FPRINTF(stderr, "ToneProducer::AllocateBuffers\n"); // allocate enough buffers to span our downstream latency, plus one size_t size = mOutput.format.u.raw_audio.buffer_size; int32 count = int32(mLatency / BufferDuration() + 1 + 1); FPRINTF(stderr, "\tlatency = %Ld, buffer duration = %Ld\n", mLatency, BufferDuration()); FPRINTF(stderr, "\tcreating group of %ld buffers, size = %lx\n", count, size); mBufferGroup = new BBufferGroup(size, count); }
status_t ToneProducer::SetBufferGroup(const media_source& for_source, BBufferGroup* newGroup) { FPRINTF(stderr, "ToneProducer::SetBufferGroup\n"); // verify that we didn't get bogus arguments before we proceed if (for_source != mOutput.source) return B_MEDIA_BAD_SOURCE; // Are we being passed the buffer group we're already using? if (newGroup == mBufferGroup) return B_OK; // Ahh, someone wants us to use a different buffer group. At this point we delete // the one we are using and use the specified one instead. If the specified group is // NULL, we need to recreate one ourselves, and use *that*. Note that if we're // caching a BBuffer that we requested earlier, we have to Recycle() that buffer // *before* deleting the buffer group, otherwise we'll deadlock waiting for that // buffer to be recycled! delete mBufferGroup; // waits for all buffers to recycle if (newGroup != NULL) { // we were given a valid group; just use that one from now on mBufferGroup = newGroup; } else { // we were passed a NULL group pointer; that means we construct // our own buffer group to use from now on size_t size = mOutput.format.u.raw_audio.buffer_size; int32 count = int32(mLatency / BufferDuration() + 1 + 1); mBufferGroup = new BBufferGroup(size, count); } return B_OK; }
status_t AudioProducer::SetBufferGroup(const media_source& forSource, BBufferGroup* newGroup) { TRACE("%p->AudioProducer::SetBufferGroup()\n", this); if (forSource != fOutput.source) return B_MEDIA_BAD_SOURCE; if (newGroup == fBufferGroup) return B_OK; if (fUsingOurBuffers && fBufferGroup) delete fBufferGroup; // waits for all buffers to recycle if (newGroup != NULL) { // we were given a valid group; just use that one from now on fBufferGroup = newGroup; fUsingOurBuffers = false; } else { // we were passed a NULL group pointer; that means we construct // our own buffer group to use from now on size_t size = fOutput.format.u.raw_audio.buffer_size; int32 count = int32(fLatency / BufferDuration() + 1 + 1); fBufferGroup = new BBufferGroup(size, count); fUsingOurBuffers = true; } return B_OK; }
BBuffer* SoundPlayNode::FillNextBuffer(bigtime_t eventTime) { CALLED(); // get a buffer from our buffer group BBuffer* buffer = fBufferGroup->RequestBuffer( fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2); // If we fail to get a buffer (for example, if the request times out), we // skip this buffer and go on to the next, to avoid locking up the control // thread if (buffer == NULL) { ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n"); return NULL; } if (fPlayer->HasData()) { fPlayer->PlayBuffer(buffer->Data(), fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio); } else memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size); // fill in the buffer header media_header* header = buffer->Header(); header->type = B_MEDIA_RAW_AUDIO; header->size_used = fOutput.format.u.raw_audio.buffer_size; header->time_source = TimeSource()->ID(); header->start_time = eventTime; return buffer; }
void GameProducer::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* io_name) { // If something earlier failed, Connect() might still be called, but with a non-zero // error code. When that happens we simply unreserve the connection and do // nothing else. if (error) { fOutput.destination = media_destination::null; fOutput.format = fPreferredFormat; return; } // Okay, the connection has been confirmed. Record the destination and format // that we agreed on, and report our connection name again. fOutput.destination = destination; fOutput.format = format; strlcpy(io_name, fOutput.name, B_MEDIA_NAME_LENGTH); // Now that we're connected, we can determine our downstream latency. // Do so, then make sure we get our events early enough. media_node_id id; FindLatencyFor(fOutput.destination, &fLatency, &id); if (!fBufferGroup) fBufferSize = fOutput.format.u.raw_audio.buffer_size; // Have to set it before latency calculating // Use a dry run to see how long it takes me to fill a buffer of data // The first step to setup the buffer bigtime_t start, produceLatency; int32 frames = int32(fBufferSize / fFrameSize); float* data = new float[frames * 2]; // Second, fill the buffer start = ::system_time(); for (int32 i = 0; i < frames; i++) { data[i*2] = 0.8 * float(i/frames); data[i*2+1] = 0.8 * float(i/frames); } produceLatency = ::system_time(); // Third, calculate the latency fInternalLatency = produceLatency - start; SetEventLatency(fLatency + fInternalLatency); // Finaily, clean up delete [] data; // reset our buffer duration, etc. to avoid later calculations bigtime_t duration = bigtime_t(1000000) * frames / bigtime_t(fOutput.format.u.raw_audio.frame_rate); SetBufferDuration(duration); // Set up the buffer group for our connection, as long as nobody handed us a // buffer group (via SetBufferGroup()) prior to this. if (!fBufferGroup) { int32 count = int32(fLatency / BufferDuration() + 2); fBufferGroup = new BBufferGroup(fBufferSize, count); } }
void AudioFilterNode::processBuffer( BBuffer* inputBuffer, BBuffer* outputBuffer) { ASSERT(inputBuffer); ASSERT(outputBuffer); ASSERT(m_op); // create wrapper objects AudioBuffer input(m_input.format.u.raw_audio, inputBuffer); AudioBuffer output(m_output.format.u.raw_audio, outputBuffer); double sourceOffset = 0.0; uint32 destinationOffset = 0L; // when is the first frame due to be consumed? bigtime_t startTime = outputBuffer->Header()->start_time; // when is the next frame to be produced going to be consumed? bigtime_t targetTime = startTime; // when will the first frame of the next buffer be consumed? bigtime_t endTime = startTime + BufferDuration(); uint32 framesRemaining = input.frames(); while(framesRemaining) { // handle all events occurring before targetTime // +++++ bigtime_t nextEventTime = endTime; // look for next event occurring before endTime // +++++ // process up to found event, if any, or to end of buffer int64 toProcess = frames_for_duration(output.format(), nextEventTime - targetTime); ASSERT(toProcess > 0); uint32 processed = m_op->process( input, output, sourceOffset, destinationOffset, (uint32)toProcess, targetTime); if(processed < toProcess) { // +++++ in offline mode this will have to request additional buffer(s), right? PRINT(( "*** AudioFilterNode::processBuffer(): insufficient frames filled\n")); } if(toProcess > framesRemaining) framesRemaining = 0; else framesRemaining -= toProcess; // advance target time targetTime = nextEventTime; // +++++ might this drift from the real frame offset? } outputBuffer->Header()->size_used = input.frames() * bytes_per_frame(m_output.format.u.raw_audio); // PRINT(("### output size: %ld\n", outputBuffer->Header()->size_used)); }
void ESDSinkNode::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* io_name) { CALLED(); node_output *channel = FindOutput(source); // is this our output? if (channel == NULL) { fprintf(stderr, "ESDSinkNode::Connect returning (cause : B_MEDIA_BAD_SOURCE)\n"); return; } // If something earlier failed, Connect() might still be called, but with a non-zero // error code. When that happens we simply unreserve the connection and do // nothing else. if (error) { channel->fOutput.destination = media_destination::null; channel->fOutput.format = channel->fPreferredFormat; return; } // Okay, the connection has been confirmed. Record the destination and format // that we agreed on, and report our connection name again. channel->fOutput.destination = destination; channel->fOutput.format = format; strncpy(io_name, channel->fOutput.name, B_MEDIA_NAME_LENGTH); // reset our buffer duration, etc. to avoid later calculations bigtime_t duration = channel->fOutput.format.u.raw_audio.buffer_size * 10000 / ( (channel->fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * channel->fOutput.format.u.raw_audio.channel_count) / ((int32)(channel->fOutput.format.u.raw_audio.frame_rate / 100)); SetBufferDuration(duration); // Now that we're connected, we can determine our downstream latency. // Do so, then make sure we get our events early enough. media_node_id id; FindLatencyFor(channel->fOutput.destination, &fLatency, &id); PRINT(("\tdownstream latency = %Ld\n", fLatency)); fInternalLatency = BufferDuration(); PRINT(("\tbuffer-filling took %Ld usec on this machine\n", fInternalLatency)); //SetEventLatency(fLatency + fInternalLatency); // Set up the buffer group for our connection, as long as nobody handed us a // buffer group (via SetBufferGroup()) prior to this. That can happen, for example, // if the consumer calls SetOutputBuffersFor() on us from within its Connected() // method. if (!channel->fBufferGroup) AllocateBuffers(*channel); // we are sure the thread is started StartThread(); }
void SoundPlayNode::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* name) { CALLED(); // is this our output? if (source != fOutput.source) { TRACE("SoundPlayNode::Connect returning\n"); return; } // If something earlier failed, Connect() might still be called, but with // a non-zero error code. When that happens we simply unreserve the // connection and do nothing else. if (error) { fOutput.destination = media_destination::null; fOutput.format.type = B_MEDIA_RAW_AUDIO; fOutput.format.u.raw_audio = media_multi_audio_format::wildcard; return; } // Okay, the connection has been confirmed. Record the destination and // format that we agreed on, and report our connection name again. fOutput.destination = destination; fOutput.format = format; strcpy(name, Name()); // Now that we're connected, we can determine our downstream latency. // Do so, then make sure we get our events early enough. media_node_id id; FindLatencyFor(fOutput.destination, &fLatency, &id); TRACE("SoundPlayNode::Connect: downstream latency = %Ld\n", fLatency); // reset our buffer duration, etc. to avoid later calculations bigtime_t duration = ((fOutput.format.u.raw_audio.buffer_size * 1000000LL) / ((fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * fOutput.format.u.raw_audio.channel_count)) / (int32)fOutput.format.u.raw_audio.frame_rate; SetBufferDuration(duration); TRACE("SoundPlayNode::Connect: buffer duration is %Ld\n", duration); fInternalLatency = (3 * BufferDuration()) / 4; TRACE("SoundPlayNode::Connect: using %Ld as internal latency\n", fInternalLatency); SetEventLatency(fLatency + fInternalLatency); // Set up the buffer group for our connection, as long as nobody handed us // a buffer group (via SetBufferGroup()) prior to this. // That can happen, for example, if the consumer calls SetOutputBuffersFor() // on us from within its Connected() method. if (!fBufferGroup) AllocateBuffers(); }
BBuffer* GameProducer::FillNextBuffer(bigtime_t event_time) { // get a buffer from our buffer group BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration()); // if we fail to get a buffer (for example, if the request times out), we // skip this buffer and go on to the next, to avoid locking up the control // thread. if (!buf) return NULL; // we need to discribe the buffer int64 frames = int64(fBufferSize / fFrameSize); memset(buf->Data(), 0, fBufferSize); // now fill the buffer with data, continuing where the last buffer left off fObject->Play(buf->Data(), frames); // fill in the buffer header media_header* hdr = buf->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = fBufferSize; hdr->time_source = TimeSource()->ID(); bigtime_t stamp; if (RunMode() == B_RECORDING) { // In B_RECORDING mode, we stamp with the capture time. We're not // really a hardware capture node, but we simulate it by using the // (precalculated) time at which this buffer "should" have been created. stamp = event_time; } else { // okay, we're in one of the "live" performance run modes. in these // modes, we stamp the buffer with the time at which the buffer should // be rendered to the output, not with the capture time. fStartTime is // the cached value of the first buffer's performance time; we calculate // this buffer's performance time as an offset from that time, based on // the amount of media we've created so far. // Recalculating every buffer like this avoids accumulation of error. stamp = fStartTime + bigtime_t(double(fFramesSent) / double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0); } hdr->start_time = stamp; return buf; }
status_t AudioProducer::_AllocateBuffers(const media_format& format) { TRACE("%p->AudioProducer::_AllocateBuffers()\n", this); if (fBufferGroup && fUsingOurBuffers) { delete fBufferGroup; fBufferGroup = NULL; } size_t size = format.u.raw_audio.buffer_size; int32 bufferDuration = BufferDuration(); int32 count = 0; if (bufferDuration > 0) { count = (int32)((fLatency + fInternalLatency) / bufferDuration + 2); } fBufferGroup = new BBufferGroup(size, count); fUsingOurBuffers = true; return fBufferGroup->InitCheck(); }
status_t ESDSinkNode::SetBufferGroup(const media_source& for_source, BBufferGroup* newGroup) { CALLED(); node_output *channel = FindOutput(for_source); // is this our output? if (channel == NULL) { fprintf(stderr, "ESDSinkNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n"); return B_MEDIA_BAD_SOURCE; } // Are we being passed the buffer group we're already using? if (newGroup == channel->fBufferGroup) return B_OK; // Ahh, someone wants us to use a different buffer group. At this point we delete // the one we are using and use the specified one instead. If the specified group is // NULL, we need to recreate one ourselves, and use *that*. Note that if we're // caching a BBuffer that we requested earlier, we have to Recycle() that buffer // *before* deleting the buffer group, otherwise we'll deadlock waiting for that // buffer to be recycled! delete channel->fBufferGroup; // waits for all buffers to recycle if (newGroup != NULL) { // we were given a valid group; just use that one from now on channel->fBufferGroup = newGroup; } else { // we were passed a NULL group pointer; that means we construct // our own buffer group to use from now on size_t size = channel->fOutput.format.u.raw_audio.buffer_size; int32 count = int32(fLatency / BufferDuration() + 1 + 1); channel->fBufferGroup = new BBufferGroup(size, count); } return B_OK; }
status_t GameProducer::SetBufferGroup(const media_source& forSource, BBufferGroup* newGroup) { // verify that we didn't get bogus arguments before we proceed if (forSource != fOutput.source) return B_MEDIA_BAD_SOURCE; // Are we being passed the buffer group we're already using? if (newGroup == fBufferGroup) return B_OK; // Ahh, someone wants us to use a different buffer group. At this point we // delete the one we are using and use the specified one instead. If the // specified group is NULL, we need to recreate one ourselves, and use // *that*. Note that if we're caching a BBuffer that we requested earlier, // we have to Recycle() that buffer *before* deleting the buffer group, // otherwise we'll deadlock waiting for that buffer to be recycled! delete fBufferGroup; // waits for all buffers to recycle if (newGroup != NULL) { // we were given a valid group; just use that one from now on fBufferGroup = newGroup; // get buffer length from the first buffer BBuffer* buffers[1]; if (newGroup->GetBufferList(1, buffers) != B_OK) return B_BAD_VALUE; fBufferSize = buffers[0]->SizeAvailable(); } else { // we were passed a NULL group pointer; that means we construct // our own buffer group to use from now on fBufferSize = fOutput.format.u.raw_audio.buffer_size; int32 count = int32(fLatency / BufferDuration() + 2); fBufferGroup = new BBufferGroup(fBufferSize, count); } return B_OK; }
BBuffer* AudioProducer::_FillNextBuffer(bigtime_t eventTime) { BBuffer* buffer = fBufferGroup->RequestBuffer( fOutput.format.u.raw_audio.buffer_size, BufferDuration()); if (!buffer) { ERROR("AudioProducer::_FillNextBuffer() - no buffer\n"); return NULL; } size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t numSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize; // number of sample in the buffer // fill in the buffer header media_header* header = buffer->Header(); header->type = B_MEDIA_RAW_AUDIO; header->time_source = TimeSource()->ID(); buffer->SetSizeUsed(fOutput.format.u.raw_audio.buffer_size); bigtime_t performanceTime = bigtime_t(double(fFramesSent) * 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate)); // fill in data from audio supplier int64 frameCount = numSamples / fOutput.format.u.raw_audio.channel_count; bigtime_t startTime = performanceTime; bigtime_t endTime = bigtime_t(double(fFramesSent + frameCount) * 1000000.0 / fOutput.format.u.raw_audio.frame_rate); if (!fSupplier || fSupplier->InitCheck() != B_OK || fSupplier->GetFrames(buffer->Data(), frameCount, startTime, endTime) != B_OK) { ERROR("AudioProducer::_FillNextBuffer() - supplier error -> silence\n"); memset(buffer->Data(), 0, buffer->SizeUsed()); } // stamp buffer if (RunMode() == B_RECORDING) { header->start_time = eventTime; } else { header->start_time = fStartTime + performanceTime; } #if DEBUG_TO_FILE BMediaTrack* track; if (BMediaFile* file = init_media_file(fOutput.format, &track)) { track->WriteFrames(buffer->Data(), frameCount); } #endif // DEBUG_TO_FILE if (fPeakListener && fOutput.format.u.raw_audio.format == media_raw_audio_format::B_AUDIO_FLOAT) { // TODO: extend the peak notifier for other sample formats int32 channels = fOutput.format.u.raw_audio.channel_count; float max[channels]; float min[channels]; for (int32 i = 0; i < channels; i++) { max[i] = -1.0; min[i] = 1.0; } float* sample = (float*)buffer->Data(); for (uint32 i = 0; i < frameCount; i++) { for (int32 k = 0; k < channels; k++) { if (*sample < min[k]) min[k] = *sample; if (*sample > max[k]) max[k] = *sample; sample++; } } BMessage message(MSG_PEAK_NOTIFICATION); for (int32 i = 0; i < channels; i++) { float maxAbs = max_c(fabs(min[i]), fabs(max[i])); message.AddFloat("max", maxAbs); } bigtime_t realTime = TimeSource()->RealTimeFor( fStartTime + performanceTime, 0); MessageEvent* event = new (std::nothrow) MessageEvent(realTime, fPeakListener, message); if (event != NULL) EventQueue::Default().AddEvent(event); } return buffer; }
/***************************************************************************** * OSSThread: asynchronous thread used to DMA the data to the device *****************************************************************************/ static void* OSSThread( vlc_object_t *p_this ) { aout_instance_t * p_aout = (aout_instance_t*)p_this; struct aout_sys_t * p_sys = p_aout->output.p_sys; mtime_t next_date = 0; int canc = vlc_savecancel (); while ( vlc_object_alive (p_aout) ) { aout_buffer_t * p_buffer = NULL; int i_tmp, i_size; uint8_t * p_bytes; if ( p_aout->output.output.i_format != VLC_CODEC_SPDIFL ) { mtime_t buffered = BufferDuration( p_aout ); /* Next buffer will be played at mdate() + buffered */ p_buffer = aout_OutputNextBuffer( p_aout, mdate() + buffered, false ); if( p_buffer == NULL && buffered > ( p_aout->output.p_sys->max_buffer_duration / p_aout->output.p_sys->i_fragstotal ) ) { /* If we have at least a fragment full, then we can wait a * little and retry to get a new audio buffer instead of * playing a blank sample */ msleep( ( p_aout->output.p_sys->max_buffer_duration / p_aout->output.p_sys->i_fragstotal / 2 ) ); continue; } } else { /* emu10k1 driver does not report Buffer Duration correctly in * passthrough mode so we have to cheat */ if( !next_date ) { next_date = mdate(); } else { mtime_t delay = next_date - mdate(); if( delay > AOUT_PTS_TOLERANCE ) { msleep( delay / 2 ); } } while( vlc_object_alive (p_aout) && ! ( p_buffer = aout_OutputNextBuffer( p_aout, next_date, true ) ) ) { msleep( VLC_HARD_MIN_SLEEP ); next_date = mdate(); } } if ( p_buffer != NULL ) { p_bytes = p_buffer->p_buffer; i_size = p_buffer->i_buffer; /* This is theoretical ... we'll see next iteration whether * we're drifting */ next_date += p_buffer->i_length; } else { i_size = FRAME_SIZE / p_aout->output.output.i_frame_length * p_aout->output.output.i_bytes_per_frame; p_bytes = malloc( i_size ); memset( p_bytes, 0, i_size ); next_date = 0; } i_tmp = write( p_sys->i_fd, p_bytes, i_size ); if( i_tmp < 0 ) { msg_Err( p_aout, "write failed (%m)" ); } if ( p_buffer != NULL ) { aout_BufferFree( p_buffer ); } else { free( p_bytes ); } } vlc_restorecancel (canc); return NULL; }
/***************************************************************************** * OSSThread: asynchronous thread used to DMA the data to the device *****************************************************************************/ static void* OSSThread( void *obj ) { audio_output_t * p_aout = (audio_output_t*)obj; struct aout_sys_t * p_sys = p_aout->sys; mtime_t next_date = 0; for( ;; ) { aout_buffer_t * p_buffer = NULL; int canc = vlc_savecancel (); if ( p_aout->format.i_format != VLC_CODEC_SPDIFL ) { mtime_t buffered = BufferDuration( p_aout ); /* Next buffer will be played at mdate() + buffered */ p_buffer = aout_PacketNext( p_aout, mdate() + buffered ); if( p_buffer == NULL && buffered > ( p_aout->sys->max_buffer_duration / p_aout->sys->i_fragstotal ) ) { vlc_restorecancel (canc); /* If we have at least a fragment full, then we can wait a * little and retry to get a new audio buffer instead of * playing a blank sample */ msleep( ( p_aout->sys->max_buffer_duration / p_aout->sys->i_fragstotal / 2 ) ); continue; } } else { vlc_restorecancel (canc); /* emu10k1 driver does not report Buffer Duration correctly in * passthrough mode so we have to cheat */ if( !next_date ) { next_date = mdate(); } else { mtime_t delay = next_date - mdate(); if( delay > AOUT_MAX_PTS_ADVANCE ) { msleep( delay / 2 ); } } for( ;; ) { canc = vlc_savecancel (); p_buffer = aout_PacketNext( p_aout, next_date ); if ( p_buffer ) break; vlc_restorecancel (canc); msleep( VLC_HARD_MIN_SLEEP ); next_date = mdate(); } } uint8_t * p_bytes; int i_size; if ( p_buffer != NULL ) { p_bytes = p_buffer->p_buffer; i_size = p_buffer->i_buffer; /* This is theoretical ... we'll see next iteration whether * we're drifting */ next_date += p_buffer->i_length; } else { i_size = FRAME_SIZE / p_aout->format.i_frame_length * p_aout->format.i_bytes_per_frame; p_bytes = malloc( i_size ); memset( p_bytes, 0, i_size ); next_date = 0; } oss_thread_ctx_t ctx = { .p_buffer = p_buffer, .p_bytes = p_bytes, }; vlc_cleanup_push( OSSThreadCleanup, &ctx ); vlc_restorecancel( canc ); int i_tmp = write( p_sys->i_fd, p_bytes, i_size ); if( i_tmp < 0 ) { msg_Err( p_aout, "write failed (%m)" ); } vlc_cleanup_run(); } return NULL; }
// create or discard buffer group if necessary void AudioFilterNode::updateBufferGroup() { status_t err; size_t inputSize = bytes_per_frame(m_input.format.u.raw_audio); size_t outputSize = bytes_per_frame(m_output.format.u.raw_audio); if(m_input.source == media_source::null || m_output.destination == media_destination::null || inputSize >= outputSize) { PRINT(("###### NO BUFFER GROUP NEEDED\n")); // no internal buffer group needed if(m_bufferGroup) { // does this block? +++++ delete m_bufferGroup; m_bufferGroup = 0; } return; } int32 bufferCount = EventLatency() / BufferDuration() + 1 + 1; // +++++ // [e.moon 27sep99] this is a reasonable number of buffers, // but it fails with looped file-player node in BeOS 4.5.2. // if(bufferCount < 5) bufferCount = 5; // if(bufferCount < 3) // bufferCount = 3; if(m_bufferGroup) { // is the current group sufficient? int32 curBufferCount; err = m_bufferGroup->CountBuffers(&curBufferCount); if(err == B_OK && curBufferCount >= bufferCount) { BBuffer* buf = m_bufferGroup->RequestBuffer( outputSize, -1); if(buf) { // yup buf->Recycle(); return; } } // nope, delete it to make way for the new one delete m_bufferGroup; m_bufferGroup = 0; } // create buffer group PRINT(( "##### AudioFilterNode::updateBufferGroup():\n" "##### creating %ld buffers of size %ld\n", bufferCount, m_output.format.u.raw_audio.buffer_size)); m_bufferGroup = new BBufferGroup( m_output.format.u.raw_audio.buffer_size, bufferCount); }
// how should we handle late buffers? drop them? // notify the producer? status_t SoundPlayNode::SendNewBuffer(const media_timed_event* event, bigtime_t lateness, bool realTimeEvent) { CALLED(); // printf("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness); // make sure we're both started *and* connected before delivering a buffer if (RunState() != BMediaEventLooper::B_STARTED || fOutput.destination == media_destination::null) return B_OK; // The event->event_time is the time at which the buffer we are preparing // here should arrive at it's destination. The MediaEventLooper should have // scheduled us early enough (based on EventLatency() and the // SchedulingLatency()) to make this possible. // lateness is independent of EventLatency()! if (lateness > (BufferDuration() / 3) ) { printf("SoundPlayNode::SendNewBuffer, event scheduled much too late, " "lateness is %Ld\n", lateness); } // skip buffer creation if output not enabled if (fOutputEnabled) { // Get the next buffer of data BBuffer* buffer = FillNextBuffer(event->event_time); if (buffer) { // If we are ready way too early, decrase internal latency /* bigtime_t how_early = event->event_time - TimeSource()->Now() - fLatency - fInternalLatency; if (how_early > 5000) { printf("SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early); if (fTooEarlyCount++ == 5) { fInternalLatency -= how_early; if (fInternalLatency < 500) fInternalLatency = 500; printf("SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", fInternalLatency); SetEventLatency(fLatency + fInternalLatency); fTooEarlyCount = 0; } } */ // send the buffer downstream if and only if output is enabled if (SendBuffer(buffer, fOutput.source, fOutput.destination) != B_OK) { // we need to recycle the buffer // if the call to SendBuffer() fails printf("SoundPlayNode::SendNewBuffer: Buffer sending " "failed\n"); buffer->Recycle(); } } } // track how much media we've delivered so far size_t nFrames = fOutput.format.u.raw_audio.buffer_size / ((fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * fOutput.format.u.raw_audio.channel_count); fFramesSent += nFrames; // The buffer is on its way; now schedule the next one to go // nextEvent is the time at which the buffer should arrive at it's // destination bigtime_t nextEvent = fStartTime + bigtime_t((1000000LL * fFramesSent) / (int32)fOutput.format.u.raw_audio.frame_rate); media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT); EventQueue()->AddEvent(nextBufferEvent); return B_OK; }
BBuffer* ToneProducer::FillNextBuffer(bigtime_t event_time) { // get a buffer from our buffer group BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration()); // if we fail to get a buffer (for example, if the request times out), we skip this // buffer and go on to the next, to avoid locking up the control thread if (!buf) { return NULL; } // now fill it with data, continuing where the last buffer left off // 20sep99: multichannel support size_t numFrames = mOutput.format.u.raw_audio.buffer_size / (sizeof(float)*mOutput.format.u.raw_audio.channel_count); bool stereo = (mOutput.format.u.raw_audio.channel_count == 2); if(!stereo) { ASSERT(mOutput.format.u.raw_audio.channel_count == 1); } // PRINT(("buffer: %ld, %ld frames, %s\n", mOutput.format.u.raw_audio.buffer_size, numFrames, stereo ? "stereo" : "mono")); float* data = (float*) buf->Data(); switch (mWaveform) { case SINE_WAVE: FillSineBuffer(data, numFrames, stereo); break; case TRIANGLE_WAVE: FillTriangleBuffer(data, numFrames, stereo); break; case SAWTOOTH_WAVE: FillSawtoothBuffer(data, numFrames, stereo); break; } // fill in the buffer header media_header* hdr = buf->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = mOutput.format.u.raw_audio.buffer_size; hdr->time_source = TimeSource()->ID(); bigtime_t stamp; if (RunMode() == B_RECORDING) { // In B_RECORDING mode, we stamp with the capture time. We're not // really a hardware capture node, but we simulate it by using the (precalculated) // time at which this buffer "should" have been created. stamp = event_time; } else { // okay, we're in one of the "live" performance run modes. in these modes, we // stamp the buffer with the time at which the buffer should be rendered to the // output, not with the capture time. mStartTime is the cached value of the // first buffer's performance time; we calculate this buffer's performance time as // an offset from that time, based on the amount of media we've created so far. // Recalculating every buffer like this avoids accumulation of error. stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0); } hdr->start_time = stamp; return buf; }