BBuffer* SoundPlayNode::FillNextBuffer(bigtime_t eventTime) { CALLED(); // get a buffer from our buffer group BBuffer* buffer = fBufferGroup->RequestBuffer( fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2); // If we fail to get a buffer (for example, if the request times out), we // skip this buffer and go on to the next, to avoid locking up the control // thread if (buffer == NULL) { ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n"); return NULL; } if (fPlayer->HasData()) { fPlayer->PlayBuffer(buffer->Data(), fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio); } else memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size); // fill in the buffer header media_header* header = buffer->Header(); header->type = B_MEDIA_RAW_AUDIO; header->size_used = fOutput.format.u.raw_audio.buffer_size; header->time_source = TimeSource()->ID(); header->start_time = eventTime; return buffer; }
BBuffer* GameProducer::FillNextBuffer(bigtime_t event_time) { // get a buffer from our buffer group BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration()); // if we fail to get a buffer (for example, if the request times out), we // skip this buffer and go on to the next, to avoid locking up the control // thread. if (!buf) return NULL; // we need to discribe the buffer int64 frames = int64(fBufferSize / fFrameSize); memset(buf->Data(), 0, fBufferSize); // now fill the buffer with data, continuing where the last buffer left off fObject->Play(buf->Data(), frames); // fill in the buffer header media_header* hdr = buf->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = fBufferSize; hdr->time_source = TimeSource()->ID(); bigtime_t stamp; if (RunMode() == B_RECORDING) { // In B_RECORDING mode, we stamp with the capture time. We're not // really a hardware capture node, but we simulate it by using the // (precalculated) time at which this buffer "should" have been created. stamp = event_time; } else { // okay, we're in one of the "live" performance run modes. in these // modes, we stamp the buffer with the time at which the buffer should // be rendered to the output, not with the capture time. fStartTime is // the cached value of the first buffer's performance time; we calculate // this buffer's performance time as an offset from that time, based on // the amount of media we've created so far. // Recalculating every buffer like this avoids accumulation of error. stamp = fStartTime + bigtime_t(double(fFramesSent) / double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0); } hdr->start_time = stamp; return buf; }
void FireWireDVNode::card_reader_thread() { status_t err; size_t rbufsize; int rcount; fCard->GetBufInfo(&rbufsize, &rcount); delete fBufferGroupEncVideo; fBufferGroupEncVideo = new BBufferGroup(rbufsize, rcount); while (!fTerminateThreads) { void *data, *end; ssize_t sizeUsed = fCard->Read(&data); if (sizeUsed < 0) { TRACE("FireWireDVNode::%s: %s\n", __FUNCTION__, strerror(sizeUsed)); continue; } end = (char*)data + sizeUsed; while (data < end) { BBuffer* buf = fBufferGroupEncVideo->RequestBuffer(rbufsize, 10000); if (!buf) { TRACE("OutVideo: request buffer timout\n"); continue; } err = fCard->Extract(buf->Data(), &data, &sizeUsed); if (err) { buf->Recycle(); printf("OutVideo Extract error %s\n", strerror(err)); continue; } media_header* hdr = buf->Header(); hdr->type = B_MEDIA_ENCODED_VIDEO; hdr->size_used = sizeUsed; hdr->time_source = TimeSource()->ID(); // set time source id //what should the start_time be? hdr->start_time = TimeSource()->PerformanceTimeFor(system_time()); fLock.Lock(); if (SendBuffer(buf, fOutputEncVideo.source, fOutputEncVideo.destination) != B_OK) { TRACE("OutVideo: sending buffer failed\n"); buf->Recycle(); } fLock.Unlock(); } } }
// how should we handle late buffers? drop them? // notify the producer? status_t ESDSinkNode::HandleBuffer( const media_timed_event *event, bigtime_t lateness, bool realTimeEvent) { CALLED(); BBuffer * buffer = const_cast<BBuffer*>((BBuffer*)event->pointer); if (buffer == 0) { fprintf(stderr,"<- B_BAD_VALUE\n"); return B_BAD_VALUE; } if(fInput.destination.id != buffer->Header()->destination) { fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n"); return B_MEDIA_BAD_DESTINATION; } media_header* hdr = buffer->Header(); bigtime_t now = TimeSource()->Now(); bigtime_t perf_time = hdr->start_time; // the how_early calculate here doesn't include scheduling latency because // we've already been scheduled to handle the buffer bigtime_t how_early = perf_time - EventLatency() - now; // if the buffer is late, we ignore it and report the fact to the producer // who sent it to us if ((RunMode() != B_OFFLINE) && // lateness doesn't matter in offline mode... (RunMode() != B_RECORDING) && // ...or in recording mode (how_early < 0LL)) { //mLateBuffers++; NotifyLateProducer(fInput.source, -how_early, perf_time); fprintf(stderr," <- LATE BUFFER : %lli\n", how_early); buffer->Recycle(); } else { if (fDevice->CanSend()) fDevice->Write(buffer->Data(), buffer->SizeUsed()); } return B_OK; }
status_t ClientNode::_InitOutputPorts() { //printf("JackClient::_InitOutputPorts()\n"); JackPortList* outputPorts = fOwner->GetOutputPorts(); for (int i = 0; i < outputPorts->CountItems(); i++) { JackPort* port = outputPorts->ItemAt(i); if (!port->IsConnected()) return B_ERROR; BBuffer* buffer = fBufferGroup->RequestBuffer( fFormat.u.raw_audio.buffer_size); if (buffer == NULL || buffer->Data() == NULL) { printf("RequestBuffer failed\n"); return B_ERROR; } port->SetProcessingBuffer(buffer); } return B_OK; }
// _FrameGenerator int32 VideoProducer::_FrameGenerator() { bool forceSendingBuffer = true; bigtime_t lastFrameSentAt = 0; bool running = true; while (running) { ldebug("VideoProducer: loop: %Ld\n", fFrame); // lock the node manager status_t err = fManager->LockWithTimeout(10000); bool ignoreEvent = false; // Data to be retrieved from the node manager. bigtime_t performanceTime = 0; bigtime_t nextPerformanceTime = 0; bigtime_t waitUntil = 0; bigtime_t nextWaitUntil = 0; bigtime_t maxRenderTime = 0; int32 playingDirection = 0; int64 playlistFrame = 0; switch (err) { case B_OK: { ldebug("VideoProducer: node manager successfully locked\n"); // get the times for the current and the next frame performanceTime = fManager->TimeForFrame(fFrame); nextPerformanceTime = fManager->TimeForFrame(fFrame + 1); maxRenderTime = min_c(bigtime_t(33334 * 0.9), max_c(fSupplier->ProcessingLatency(), maxRenderTime)); waitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase + performanceTime, 0) - maxRenderTime; nextWaitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase + nextPerformanceTime, 0) - maxRenderTime; // get playing direction and playlist frame for the current frame bool newPlayingState; playlistFrame = fManager->PlaylistFrameAtFrame(fFrame, playingDirection, newPlayingState); ldebug("VideoProducer: performance time: %Ld, playlist frame: %Ld\n", performanceTime, playlistFrame); forceSendingBuffer |= newPlayingState; fManager->SetCurrentVideoTime(nextPerformanceTime); fManager->Unlock(); break; } case B_TIMED_OUT: ldebug("VideoProducer: Couldn't lock the node manager.\n"); ignoreEvent = true; waitUntil = system_time() - 1; break; default: printf("Couldn't lock the node manager. Terminating video producer " "frame generator thread.\n"); ignoreEvent = true; waitUntil = system_time() - 1; running = false; break; } // Force sending a frame, if the last one has been sent more than // one second ago. if (lastFrameSentAt + 1000000 < performanceTime) forceSendingBuffer = true; ldebug("VideoProducer: waiting (%Ld)...\n", waitUntil); // wait until... err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT, waitUntil); // The only acceptable responses are B_OK and B_TIMED_OUT. Everything // else means the thread should quit. Deleting the semaphore, as in // VideoProducer::_HandleStop(), will trigger this behavior. switch (err) { case B_OK: ldebug("VideoProducer::_FrameGenerator - going back to sleep.\n"); break; case B_TIMED_OUT: ldebug("VideoProducer: timed out => event\n"); // Catch the cases in which the node manager could not be // locked and we therefore have no valid data to work with, // or the producer is not running or enabled. if (ignoreEvent || !fRunning || !fEnabled) { ldebug("VideoProducer: ignore event\n"); // nothing to do // Drop frame if it's at least a frame late. } else if (nextWaitUntil < system_time()) { //printf("VideoProducer: dropped frame (%ld)\n", fFrame); if (fManager->LockWithTimeout(10000) == B_OK) { fManager->FrameDropped(); fManager->Unlock(); } // next frame fFrame++; // Send buffers only, if playing, the node is running and the // output has been enabled } else if (playingDirection != 0 || forceSendingBuffer) { ldebug("VideoProducer: produce frame\n"); BAutolock _(fLock); // Fetch a buffer from the buffer group BBuffer *buffer = fUsedBufferGroup->RequestBuffer( fConnectedFormat.display.bytes_per_row * fConnectedFormat.display.line_count, 0LL); if (buffer) { // Fill out the details about this buffer. media_header *h = buffer->Header(); h->type = B_MEDIA_RAW_VIDEO; h->time_source = TimeSource()->ID(); h->size_used = fConnectedFormat.display.bytes_per_row * fConnectedFormat.display.line_count; // For a buffer originating from a device, you might // want to calculate this based on the // PerformanceTimeFor the time your buffer arrived at // the hardware (plus any applicable adjustments). h->start_time = fPerformanceTimeBase + performanceTime; h->file_pos = 0; h->orig_size = 0; h->data_offset = 0; h->u.raw_video.field_gamma = 1.0; h->u.raw_video.field_sequence = fFrame; h->u.raw_video.field_number = 0; h->u.raw_video.pulldown_number = 0; h->u.raw_video.first_active_line = 1; h->u.raw_video.line_count = fConnectedFormat.display.line_count; // Fill in a frame media_format mf; mf.type = B_MEDIA_RAW_VIDEO; mf.u.raw_video = fConnectedFormat; ldebug("VideoProducer: frame: %Ld, playlistFrame: %Ld\n", fFrame, playlistFrame); bool forceOrWasCached = forceSendingBuffer; // if (fManager->LockWithTimeout(5000) == B_OK) { // we need to lock the manager, or our // fSupplier might work on bad data err = fSupplier->FillBuffer(playlistFrame, buffer->Data(), &mf, forceOrWasCached); // fManager->Unlock(); // } else { // err = B_ERROR; // } // clean the buffer if something went wrong if (err != B_OK) { memset(buffer->Data(), 0, h->size_used); err = B_OK; } // Send the buffer on down to the consumer if (!forceOrWasCached) { if (SendBuffer(buffer, fOutput.source, fOutput.destination) != B_OK) { printf("_FrameGenerator: Error sending buffer\n"); // If there is a problem sending the buffer, // or if we don't send the buffer because its // contents are the same as the last one, // return it to its buffer group. buffer->Recycle(); // we tell the supplier to delete // its caches if there was a problem sending // the buffer fSupplier->DeleteCaches(); } } else buffer->Recycle(); // Only if everything went fine we clear the flag // that forces us to send a buffer even if not // playing. if (err == B_OK) { forceSendingBuffer = false; lastFrameSentAt = performanceTime; } } else ldebug("no buffer!\n"); // next frame fFrame++; } else { ldebug("VideoProducer: not playing\n"); // next frame fFrame++; } break; default: ldebug("Couldn't acquire semaphore. Error: %s\n", strerror(err)); running = false; break; } } ldebug("VideoProducer: frame generator thread done.\n"); return B_OK; }
void TVideoPreviewView::DisplayThread() { FUNCTION("TVideoPreviewView::DisplayThread\n"); bigtime_t timeout = 5000; bigtime_t realTimeNow = 0; bigtime_t perfTimeNow = 0; bigtime_t halfPeriod = (bigtime_t) (500000./29.97); bool timeSourceRunning = false; while (!mDisplayQuit) { if (acquire_sem(mServiceLock) == B_NO_ERROR) { timeSourceRunning = TimeSource()->IsRunning(); realTimeNow = BTimeSource::RealTime(); perfTimeNow = TimeSource()->Now(); release_sem(mServiceLock); } snooze(timeout); if (timeSourceRunning) { // if we received a Stop, deal with it if (mStopping) { PROGRESS("VidConsumer::DisplayThread - STOP\n"); if (perfTimeNow >= mStopTime) { mRunning = false; mStopping = false; // deal with any pending Seek if (mSeeking) mSeeking = false; //if (mConnected) // SendDataStatus(B_DATA_NOT_AVAILABLE, mConnections[0], mStopTime); continue; } } // if we received a Seek, deal with it if (mSeeking) { PROGRESS("VidConsumer::DisplayThread - SEEK\n"); if (perfTimeNow >= mSeekTime) { PROGRESS("VidConsumer::DisplayThread - DO SEEK\n"); mSeeking = false; mDeltaTime = mMediaTime; continue; } } // if we received a Start, deal with it if (mStarting) { PROGRESS("BBt848Controllable::CaptureRun mStartTime = %.4f TimeNow = %.4f\n", (double)mStartTime/M1, (double)perfTimeNow/M1); if (perfTimeNow >= mStartTime) { mRunning = true; mStarting = false; mDeltaTime = mStartTime; //if (mConnected) // SendDataStatus(B_DATA_AVAILABLE, mConnections[0], mStartTime); continue; } } if (mRunning) { // check for buffer available. status_t err = acquire_sem_etc(mBufferAvailable, 1, B_TIMEOUT, halfPeriod * 2); if (err == B_TIMED_OUT || !mConnected) { ERROR("VidConsumer::DisplayThread - Error from acquire_sem_etc: 0x%lx\n", err); continue; } BBuffer* buffer = mBufferQueue->PopFirstBuffer(0); LOOP("Popped buffer %08x, Start time: %.4f, system time: %.4f diff: %.4f\n", buffer, (double) buffer->Header()->start_time/M1, (double) perfTimeNow/M1, (double) (buffer->Header()->start_time - perfTimeNow)/M1); // Display frame if we're in B_OFFLINE mode or // within +/- a half frame time of start time if ( (mRunMode == B_OFFLINE) || ((perfTimeNow > (buffer->Header()->start_time - halfPeriod)) && (perfTimeNow < (buffer->Header()->start_time + halfPeriod))) ) { uint32 bpp = (mColorspace == B_RGB32 ? 4 : 2); memcpy(m_Bitmap->Bits(), buffer->Data(), mRowBytes * mYSize * bpp); buffer->Header()->start_time = system_time(); buffer->Recycle(); bigtime_t t1 = system_time(); // Update view if (LockLooper()) { DrawBitmap(m_Bitmap, Bounds()); UnlockLooper(); } t1 = system_time() - t1; if (t1/M1 > .030) printf("Draw time = %.4f\n",t1/M1); continue; } else { // If we're too early, push frame back on stack if (perfTimeNow < buffer->Header()->start_time) { LOOP("push buffer back on stack!\n"); mBufferQueue->PushBuffer(buffer, buffer->Header()->start_time); release_sem(mBufferAvailable); continue; } else { // if we've already passed a half frame time past the buffer start time // and RunMode = INCREASE_LATENCY, increase latency and display the frame if ( (perfTimeNow > buffer->Header()->start_time) && (mRunMode == B_INCREASE_LATENCY)) { mMyLatency += halfPeriod; ERROR("VidConsumer::DisplayThread - Increased latency to: %.4f\n", mMyLatency); ERROR(" Performance time: %.4f @ %.4f\n", (double)buffer->Header()->start_time/M1, (double)perfTimeNow/M1); uint32 bpp = (mColorspace == B_RGB32 ? 4 : 2); memcpy(m_Bitmap->Bits(), buffer->Data(), mRowBytes * mYSize * bpp); buffer->Recycle(); // should send late notice if (LockLooper()) { DrawBitmap(m_Bitmap, Bounds()); UnlockLooper(); } continue; } else { // we're more than a half frame time past the buffer start time // drop the frame ERROR("VidConsumer::DisplayThread - dropped late frame: %.4f @ %.4f\n", (double)buffer->Header()->start_time/M1, (double)perfTimeNow/M1); buffer->Recycle(); // should send late notice continue; } } } } snooze(timeout); } else snooze(timeout); // if TimeSource stopped } // while (!mTimeToQuit) }
BBuffer* AudioProducer::_FillNextBuffer(bigtime_t eventTime) { BBuffer* buffer = fBufferGroup->RequestBuffer( fOutput.format.u.raw_audio.buffer_size, BufferDuration()); if (!buffer) { ERROR("AudioProducer::_FillNextBuffer() - no buffer\n"); return NULL; } size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t numSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize; // number of sample in the buffer // fill in the buffer header media_header* header = buffer->Header(); header->type = B_MEDIA_RAW_AUDIO; header->time_source = TimeSource()->ID(); buffer->SetSizeUsed(fOutput.format.u.raw_audio.buffer_size); bigtime_t performanceTime = bigtime_t(double(fFramesSent) * 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate)); // fill in data from audio supplier int64 frameCount = numSamples / fOutput.format.u.raw_audio.channel_count; bigtime_t startTime = performanceTime; bigtime_t endTime = bigtime_t(double(fFramesSent + frameCount) * 1000000.0 / fOutput.format.u.raw_audio.frame_rate); if (!fSupplier || fSupplier->InitCheck() != B_OK || fSupplier->GetFrames(buffer->Data(), frameCount, startTime, endTime) != B_OK) { ERROR("AudioProducer::_FillNextBuffer() - supplier error -> silence\n"); memset(buffer->Data(), 0, buffer->SizeUsed()); } // stamp buffer if (RunMode() == B_RECORDING) { header->start_time = eventTime; } else { header->start_time = fStartTime + performanceTime; } #if DEBUG_TO_FILE BMediaTrack* track; if (BMediaFile* file = init_media_file(fOutput.format, &track)) { track->WriteFrames(buffer->Data(), frameCount); } #endif // DEBUG_TO_FILE if (fPeakListener && fOutput.format.u.raw_audio.format == media_raw_audio_format::B_AUDIO_FLOAT) { // TODO: extend the peak notifier for other sample formats int32 channels = fOutput.format.u.raw_audio.channel_count; float max[channels]; float min[channels]; for (int32 i = 0; i < channels; i++) { max[i] = -1.0; min[i] = 1.0; } float* sample = (float*)buffer->Data(); for (uint32 i = 0; i < frameCount; i++) { for (int32 k = 0; k < channels; k++) { if (*sample < min[k]) min[k] = *sample; if (*sample > max[k]) max[k] = *sample; sample++; } } BMessage message(MSG_PEAK_NOTIFICATION); for (int32 i = 0; i < channels; i++) { float maxAbs = max_c(fabs(min[i]), fabs(max[i])); message.AddFloat("max", maxAbs); } bigtime_t realTime = TimeSource()->RealTimeFor( fStartTime + performanceTime, 0); MessageEvent* event = new (std::nothrow) MessageEvent(realTime, fPeakListener, message); if (event != NULL) EventQueue::Default().AddEvent(event); } return buffer; }
BBuffer* ToneProducer::FillNextBuffer(bigtime_t event_time) { // get a buffer from our buffer group BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration()); // if we fail to get a buffer (for example, if the request times out), we skip this // buffer and go on to the next, to avoid locking up the control thread if (!buf) { return NULL; } // now fill it with data, continuing where the last buffer left off // 20sep99: multichannel support size_t numFrames = mOutput.format.u.raw_audio.buffer_size / (sizeof(float)*mOutput.format.u.raw_audio.channel_count); bool stereo = (mOutput.format.u.raw_audio.channel_count == 2); if(!stereo) { ASSERT(mOutput.format.u.raw_audio.channel_count == 1); } // PRINT(("buffer: %ld, %ld frames, %s\n", mOutput.format.u.raw_audio.buffer_size, numFrames, stereo ? "stereo" : "mono")); float* data = (float*) buf->Data(); switch (mWaveform) { case SINE_WAVE: FillSineBuffer(data, numFrames, stereo); break; case TRIANGLE_WAVE: FillTriangleBuffer(data, numFrames, stereo); break; case SAWTOOTH_WAVE: FillSawtoothBuffer(data, numFrames, stereo); break; } // fill in the buffer header media_header* hdr = buf->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = mOutput.format.u.raw_audio.buffer_size; hdr->time_source = TimeSource()->ID(); bigtime_t stamp; if (RunMode() == B_RECORDING) { // In B_RECORDING mode, we stamp with the capture time. We're not // really a hardware capture node, but we simulate it by using the (precalculated) // time at which this buffer "should" have been created. stamp = event_time; } else { // okay, we're in one of the "live" performance run modes. in these modes, we // stamp the buffer with the time at which the buffer should be rendered to the // output, not with the capture time. mStartTime is the cached value of the // first buffer's performance time; we calculate this buffer's performance time as // an offset from that time, based on the amount of media we've created so far. // Recalculating every buffer like this avoids accumulation of error. stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0); } hdr->start_time = stamp; return buf; }
int32 VideoProducer::_FrameGeneratorThread() { bool forceSendingBuffer = true; int32 droppedFrames = 0; const int32 kMaxDroppedFrames = 15; bool running = true; while (running) { TRACE("_FrameGeneratorThread: loop: %Ld\n", fFrame); // lock the node manager status_t err = fManager->LockWithTimeout(10000); bool ignoreEvent = false; // Data to be retrieved from the node manager. bigtime_t performanceTime = 0; bigtime_t nextPerformanceTime = 0; bigtime_t waitUntil = 0; bigtime_t nextWaitUntil = 0; int32 playingDirection = 0; int32 playingMode = 0; int64 playlistFrame = 0; switch (err) { case B_OK: { TRACE("_FrameGeneratorThread: node manager successfully " "locked\n"); if (droppedFrames > 0) fManager->FrameDropped(); // get the times for the current and the next frame performanceTime = fManager->TimeForFrame(fFrame); nextPerformanceTime = fManager->TimeForFrame(fFrame + 1); playingMode = fManager->PlayModeAtFrame(fFrame); waitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase + performanceTime, fBufferLatency); nextWaitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase + nextPerformanceTime, fBufferLatency); // get playing direction and playlist frame for the current // frame bool newPlayingState; playlistFrame = fManager->PlaylistFrameAtFrame(fFrame, playingDirection, newPlayingState); TRACE("_FrameGeneratorThread: performance time: %Ld, " "playlist frame: %lld\n", performanceTime, playlistFrame); forceSendingBuffer |= newPlayingState; fManager->SetCurrentVideoTime(nextPerformanceTime); fManager->Unlock(); break; } case B_TIMED_OUT: TRACE("_FrameGeneratorThread: Couldn't lock the node " "manager.\n"); ignoreEvent = true; waitUntil = system_time() - 1; break; default: ERROR("_FrameGeneratorThread: Couldn't lock the node manager. " "Terminating video producer frame generator thread.\n"); TRACE("_FrameGeneratorThread: frame generator thread done.\n"); // do not access any member variables, since this could // also mean the Node has been deleted return B_OK; } TRACE("_FrameGeneratorThread: waiting (%Ld)...\n", waitUntil); // wait until... err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT, waitUntil); // The only acceptable responses are B_OK and B_TIMED_OUT. Everything // else means the thread should quit. Deleting the semaphore, as in // VideoProducer::_HandleStop(), will trigger this behavior. switch (err) { case B_OK: TRACE("_FrameGeneratorThread: going back to sleep.\n"); break; case B_TIMED_OUT: TRACE("_FrameGeneratorThread: timed out => event\n"); // Catch the cases in which the node manager could not be // locked and we therefore have no valid data to work with, // or the producer is not running or enabled. if (ignoreEvent || !fRunning || !fEnabled) { TRACE("_FrameGeneratorThread: ignore event\n"); // nothing to do } else if (!forceSendingBuffer && nextWaitUntil < system_time() - fBufferLatency && droppedFrames < kMaxDroppedFrames) { // Drop frame if it's at least a frame late. if (playingDirection > 0) printf("VideoProducer: dropped frame (%Ld)\n", fFrame); // next frame droppedFrames++; fFrame++; } else if (playingDirection != 0 || forceSendingBuffer) { // Send buffers only, if playing, the node is running and // the output has been enabled TRACE("_FrameGeneratorThread: produce frame\n"); BAutolock _(fLock); // Fetch a buffer from the buffer group fUsedBufferGroup->WaitForBuffers(); BBuffer* buffer = fUsedBufferGroup->RequestBuffer( fConnectedFormat.display.bytes_per_row * fConnectedFormat.display.line_count, 0LL); if (buffer == NULL) { // Wait until a buffer becomes available again ERROR("_FrameGeneratorThread: no buffer!\n"); break; } // Fill out the details about this buffer. media_header* h = buffer->Header(); h->type = B_MEDIA_RAW_VIDEO; h->time_source = TimeSource()->ID(); h->size_used = fConnectedFormat.display.bytes_per_row * fConnectedFormat.display.line_count; // For a buffer originating from a device, you might // want to calculate this based on the // PerformanceTimeFor the time your buffer arrived at // the hardware (plus any applicable adjustments). h->start_time = fPerformanceTimeBase + performanceTime; h->file_pos = 0; h->orig_size = 0; h->data_offset = 0; h->u.raw_video.field_gamma = 1.0; h->u.raw_video.field_sequence = fFrame; h->u.raw_video.field_number = 0; h->u.raw_video.pulldown_number = 0; h->u.raw_video.first_active_line = 1; h->u.raw_video.line_count = fConnectedFormat.display.line_count; // Fill in a frame TRACE("_FrameGeneratorThread: frame: %Ld, " "playlistFrame: %Ld\n", fFrame, playlistFrame); bool wasCached = false; err = fSupplier->FillBuffer(playlistFrame, buffer->Data(), fConnectedFormat, forceSendingBuffer, wasCached); if (err == B_TIMED_OUT) { // Don't send the buffer if there was insufficient // time for rendering, this will leave the last // valid frame on screen until we catch up, instead // of going black. wasCached = true; err = B_OK; } // clean the buffer if something went wrong if (err != B_OK) { // TODO: should use "back value" according // to color space! memset(buffer->Data(), 0, h->size_used); err = B_OK; } // Send the buffer on down to the consumer if (wasCached || (err = SendBuffer(buffer, fOutput.source, fOutput.destination) != B_OK)) { // If there is a problem sending the buffer, // or if we don't send the buffer because its // contents are the same as the last one, // return it to its buffer group. buffer->Recycle(); // we tell the supplier to delete // its caches if there was a problem sending // the buffer if (err != B_OK) { ERROR("_FrameGeneratorThread: Error " "sending buffer\n"); fSupplier->DeleteCaches(); } } // Only if everything went fine we clear the flag // that forces us to send a buffer even if not // playing. if (err == B_OK) forceSendingBuffer = false; // next frame fFrame++; droppedFrames = 0; } else { TRACE("_FrameGeneratorThread: not playing\n"); // next frame fFrame++; } break; default: TRACE("_FrameGeneratorThread: Couldn't acquire semaphore. " "Error: %s\n", strerror(err)); running = false; break; } } TRACE("_FrameGeneratorThread: frame generator thread done.\n"); return B_OK; }
int main() { // app_server connection (no need to run it) BApplication app("application/x-vnd-test"); BBufferGroup * group; status_t s; int32 count; BBuffer *buffer; /* printf("using default constructor:\n"); group = new BBufferGroup(); s = group->InitCheck(); printf("InitCheck: status = %ld\n",s); s = group->CountBuffers(&count); printf("CountBuffers: count = %ld, status = %ld\n",count,s); delete group; */ printf("\n"); printf("using size = 1234 constructor:\n"); group = new BBufferGroup(1234); s = group->InitCheck(); printf("InitCheck: status = %ld\n",s); s = group->CountBuffers(&count); printf("CountBuffers: count = %ld, status = %ld\n",count,s); s = group->GetBufferList(1,&buffer); printf("GetBufferList: status = %ld\n",s); printf("Buffer->Data: = %08x\n",(int)buffer->Data()); printf("Buffer->ID: = %d\n",(int)buffer->ID()); printf("Buffer->Size: = %ld\n",buffer->Size()); printf("Buffer->SizeAvailable: = %ld\n",buffer->SizeAvailable()); printf("Buffer->SizeUsed: = %ld\n",buffer->SizeUsed()); printf("\n"); media_buffer_id id = buffer->ID(); BBufferGroup * group2 = new BBufferGroup(1,&id); printf("creating second group with a buffer from first group:\n"); s = group2->InitCheck(); printf("InitCheck: status = %ld\n",s); s = group2->CountBuffers(&count); printf("CountBuffers: count = %ld, status = %ld\n",count,s); buffer = 0; s = group2->GetBufferList(1,&buffer); printf("GetBufferList: status = %ld\n",s); printf("Buffer->Data: = %08x\n",(int)buffer->Data()); printf("Buffer->ID: = %d\n",(int)buffer->ID()); printf("Buffer->Size: = %ld\n",buffer->Size()); printf("Buffer->SizeAvailable: = %ld\n",buffer->SizeAvailable()); printf("Buffer->SizeUsed: = %ld\n",buffer->SizeUsed()); delete group; delete group2; printf("\n"); /* printf("creating a BSmallBuffer:\n"); BSmallBuffer * sb = new BSmallBuffer; printf("sb->Data: = %08x\n",(int)sb->Data()); printf("sb->ID: = %d\n",(int)sb->ID()); printf("sb->Size: = %ld\n",sb->Size()); printf("sb->SizeAvailable: = %ld\n",sb->SizeAvailable()); printf("sb->SizeUsed: = %ld\n",sb->SizeUsed()); printf("sb->SmallBufferSizeLimit: = %ld\n",sb->SmallBufferSizeLimit()); delete sb; */ return 0; }
void MixerCore::_MixThread() { // The broken BeOS R5 multiaudio node starts with time 0, // then publishes negative times for about 50ms, publishes 0 // again until it finally reaches time values > 0 if (!LockFromMixThread()) return; bigtime_t start = fTimeSource->Now(); Unlock(); while (start <= 0) { TRACE("MixerCore: delaying _MixThread start, timesource is at %Ld\n", start); snooze(5000); if (!LockFromMixThread()) return; start = fTimeSource->Now(); Unlock(); } if (!LockFromMixThread()) return; bigtime_t latency = max((bigtime_t)3600, bigtime_t(0.4 * buffer_duration( fOutput->MediaOutput().format.u.raw_audio))); // TODO: when the format changes while running, everything is wrong! bigtime_t bufferRequestTimeout = buffer_duration( fOutput->MediaOutput().format.u.raw_audio) / 2; TRACE("MixerCore: starting _MixThread at %Ld with latency %Ld and " "downstream latency %Ld, bufferRequestTimeout %Ld\n", start, latency, fDownstreamLatency, bufferRequestTimeout); // We must read from the input buffer at a position (pos) that is always // a multiple of fMixBufferFrameCount. int64 temp = frames_for_duration(fMixBufferFrameRate, start); int64 frameBase = ((temp / fMixBufferFrameCount) + 1) * fMixBufferFrameCount; bigtime_t timeBase = duration_for_frames(fMixBufferFrameRate, frameBase); Unlock(); TRACE("MixerCore: starting _MixThread, start %Ld, timeBase %Ld, " "frameBase %Ld\n", start, timeBase, frameBase); ASSERT(fMixBufferFrameCount > 0); #if DEBUG uint64 bufferIndex = 0; #endif typedef RtList<chan_info> chan_info_list; chan_info_list inputChanInfos[MAX_CHANNEL_TYPES]; BStackOrHeapArray<chan_info_list, 16> mixChanInfos(fMixBufferChannelCount); // TODO: this does not support changing output channel count bigtime_t eventTime = timeBase; int64 framePos = 0; for (;;) { if (!LockFromMixThread()) return; bigtime_t waitUntil = fTimeSource->RealTimeFor(eventTime, 0) - latency - fDownstreamLatency; Unlock(); status_t rv = acquire_sem_etc(fMixThreadWaitSem, 1, B_ABSOLUTE_TIMEOUT, waitUntil); if (rv == B_INTERRUPTED) continue; if (rv != B_TIMED_OUT && rv < B_OK) return; if (!LockWithTimeout(10000)) { ERROR("MixerCore: LockWithTimeout failed\n"); continue; } // no inputs or output muted, skip further processing and just send an // empty buffer if (fInputs->IsEmpty() || fOutput->IsMuted()) { int size = fOutput->MediaOutput().format.u.raw_audio.buffer_size; BBuffer* buffer = fBufferGroup->RequestBuffer(size, bufferRequestTimeout); if (buffer != NULL) { memset(buffer->Data(), 0, size); // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = size; hdr->time_source = fTimeSource->ID(); hdr->start_time = eventTime; if (fNode->SendBuffer(buffer, fOutput) != B_OK) { #if DEBUG ERROR("MixerCore: SendBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: SendBuffer failed\n"); #endif buffer->Recycle(); } } else { #if DEBUG ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: RequestBuffer failed\n"); #endif } goto schedule_next_event; } int64 currentFramePos; currentFramePos = frameBase + framePos; // mix all data from all inputs into the mix buffer ASSERT(currentFramePos % fMixBufferFrameCount == 0); PRINT(4, "create new buffer event at %Ld, reading input frames at " "%Ld\n", eventTime, currentFramePos); // Init the channel information for each MixerInput. for (int i = 0; MixerInput* input = Input(i); i++) { int count = input->GetMixerChannelCount(); for (int channel = 0; channel < count; channel++) { int type; const float* base; uint32 sampleOffset; float gain; if (!input->GetMixerChannelInfo(channel, currentFramePos, eventTime, &base, &sampleOffset, &type, &gain)) { continue; } if (type < 0 || type >= MAX_CHANNEL_TYPES) continue; chan_info* info = inputChanInfos[type].Create(); info->base = (const char*)base; info->sample_offset = sampleOffset; info->gain = gain; } } for (int channel = 0; channel < fMixBufferChannelCount; channel++) { int sourceCount = fOutput->GetOutputChannelSourceCount(channel); for (int i = 0; i < sourceCount; i++) { int type; float gain; fOutput->GetOutputChannelSourceInfoAt(channel, i, &type, &gain); if (type < 0 || type >= MAX_CHANNEL_TYPES) continue; int count = inputChanInfos[type].CountItems(); for (int j = 0; j < count; j++) { chan_info* info = inputChanInfos[type].ItemAt(j); chan_info* newInfo = mixChanInfos[channel].Create(); newInfo->base = info->base; newInfo->sample_offset = info->sample_offset; newInfo->gain = info->gain * gain; } } } memset(fMixBuffer, 0, fMixBufferChannelCount * fMixBufferFrameCount * sizeof(float)); for (int channel = 0; channel < fMixBufferChannelCount; channel++) { PRINT(5, "_MixThread: channel %d has %d sources\n", channel, mixChanInfos[channel].CountItems()); int count = mixChanInfos[channel].CountItems(); for (int i = 0; i < count; i++) { chan_info* info = mixChanInfos[channel].ItemAt(i); PRINT(5, "_MixThread: base %p, sample-offset %2d, gain %.3f\n", info->base, info->sample_offset, info->gain); // This looks slightly ugly, but the current GCC will generate // the fastest code this way. // fMixBufferFrameCount is always > 0. uint32 dstSampleOffset = fMixBufferChannelCount * sizeof(float); uint32 srcSampleOffset = info->sample_offset; register char* dst = (char*)&fMixBuffer[channel]; register char* src = (char*)info->base; register float gain = info->gain; register int j = fMixBufferFrameCount; do { *(float*)dst += *(const float*)src * gain; dst += dstSampleOffset; src += srcSampleOffset; } while (--j); } } // request a buffer BBuffer* buffer; buffer = fBufferGroup->RequestBuffer( fOutput->MediaOutput().format.u.raw_audio.buffer_size, bufferRequestTimeout); if (buffer != NULL) { // copy data from mix buffer into output buffer for (int i = 0; i < fMixBufferChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(fMixBuffer) + i * sizeof(float), fMixBufferChannelCount * sizeof(float), fMixBufferFrameCount, reinterpret_cast<char*>(buffer->Data()) + (i * bytes_per_sample( fOutput->MediaOutput().format.u.raw_audio)), bytes_per_frame(fOutput->MediaOutput().format.u.raw_audio), frames_per_buffer( fOutput->MediaOutput().format.u.raw_audio), fOutputGain * fOutput->GetOutputChannelGain(i)); } PRINT(4, "send buffer, inframes %ld, outframes %ld\n", fMixBufferFrameCount, frames_per_buffer(fOutput->MediaOutput().format.u.raw_audio)); // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = fOutput->MediaOutput().format.u.raw_audio.buffer_size; hdr->time_source = fTimeSource->ID(); hdr->start_time = eventTime; // swap byte order if necessary fOutput->AdjustByteOrder(buffer); // send the buffer status_t res = fNode->SendBuffer(buffer, fOutput); if (res != B_OK) { #if DEBUG ERROR("MixerCore: SendBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: SendBuffer failed\n"); #endif buffer->Recycle(); } } else { #if DEBUG ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: RequestBuffer failed\n"); #endif } // make all lists empty for (int i = 0; i < MAX_CHANNEL_TYPES; i++) inputChanInfos[i].MakeEmpty(); for (int i = 0; i < fOutput->GetOutputChannelCount(); i++) mixChanInfos[i].MakeEmpty(); schedule_next_event: // schedule next event framePos += fMixBufferFrameCount; eventTime = timeBase + bigtime_t((1000000LL * framePos) / fMixBufferFrameRate); Unlock(); #if DEBUG bufferIndex++; #endif } }
/* The following functions form the thread that generates frames. You should * replace this with the code that interfaces to your hardware. */ int32 VideoProducer::FrameGenerator() { bigtime_t wait_until = system_time(); while (1) { PRINTF(1, ("FrameGenerator: acquire_sem_etc() until %Ldµs (in %Ldµs)\n", wait_until, wait_until - system_time())); status_t err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT, wait_until); /* The only acceptable responses are B_OK and B_TIMED_OUT. Everything * else means the thread should quit. Deleting the semaphore, as in * VideoProducer::HandleStop(), will trigger this behavior. */ if ((err != B_OK) && (err != B_TIMED_OUT)) break; fFrame++; /* Recalculate the time until the thread should wake up to begin * processing the next frame. Subtract fProcessingLatency so that * the frame is sent in time. */ wait_until = TimeSource()->RealTimeFor(fPerformanceTimeBase, 0) + (bigtime_t) ((fFrame - fFrameBase) * (1000000 / fConnectedFormat.field_rate)) - fProcessingLatency; PRINT(("PS: %Ld\n", fProcessingLatency)); /* Drop frame if it's at least a frame late */ if (wait_until < system_time()) continue; PRINTF(1, ("FrameGenerator: wait until %Ld, %ctimed out, %crunning, %cenabled.\n", wait_until, (err == B_OK)?'!':' ', (fRunning)?' ':'!', (fEnabled)?' ':'!')); /* If the semaphore was acquired successfully, it means something * changed the timing information (see VideoProducer::Connect()) and * so the thread should go back to sleep until the newly-calculated * wait_until time. */ if (err == B_OK) continue; /* Send buffers only if the node is running and the output has been * enabled */ if (!fRunning || !fEnabled) continue; BAutolock _(fLock); /* Fetch a buffer from the buffer group */ BBuffer *buffer = fBufferGroup->RequestBuffer( 4 * fConnectedFormat.display.line_width * fConnectedFormat.display.line_count, 0LL); if (!buffer) continue; /* Fill out the details about this buffer. */ media_header *h = buffer->Header(); h->type = B_MEDIA_RAW_VIDEO; h->time_source = TimeSource()->ID(); h->size_used = 4 * fConnectedFormat.display.line_width * fConnectedFormat.display.line_count; /* For a buffer originating from a device, you might want to calculate * this based on the PerformanceTimeFor the time your buffer arrived at * the hardware (plus any applicable adjustments). */ /* h->start_time = fPerformanceTimeBase + (bigtime_t) ((fFrame - fFrameBase) * (1000000 / fConnectedFormat.field_rate)); */ h->file_pos = 0; h->orig_size = 0; h->data_offset = 0; h->u.raw_video.field_gamma = 1.0; h->u.raw_video.field_sequence = fFrame; h->u.raw_video.field_number = 0; h->u.raw_video.pulldown_number = 0; h->u.raw_video.first_active_line = 1; h->u.raw_video.line_count = fConnectedFormat.display.line_count; // This is where we fill the video buffer. #if 0 uint32 *p = (uint32 *)buffer->Data(); /* Fill in a pattern */ for (uint32 y=0;y<fConnectedFormat.display.line_count;y++) for (uint32 x=0;x<fConnectedFormat.display.line_width;x++) *(p++) = ((((x+y)^0^x)+fFrame) & 0xff) * (0x01010101 & fColor); #endif //NO! must be called without lock! //BAutolock lock(fCamDevice->Locker()); bigtime_t now = system_time(); bigtime_t stamp; //#ifdef UseFillFrameBuffer err = fCamDevice->FillFrameBuffer(buffer, &stamp); if (err < B_OK) { ;//XXX handle error fStats[0].missed++; } //#endif #ifdef UseGetFrameBitmap BBitmap *bm; err = fCamDevice->GetFrameBitmap(&bm, &stamp); if (err >= B_OK) { ;//XXX handle error fStats[0].missed++; } #endif fStats[0].frames = fFrame; fStats[0].actual++;; fStats[0].stamp = system_time(); //PRINTF(1, ("FrameGenerator: stamp %Ld vs %Ld\n", stamp, h->start_time)); //XXX: that's what we should be doing, but CodyCam drops all frames as they are late. (maybe add latency ??) //h->start_time = TimeSource()->PerformanceTimeFor(stamp); h->start_time = TimeSource()->PerformanceTimeFor(system_time()); // update processing latency // XXX: should I ?? fProcessingLatency = system_time() - now; fProcessingLatency /= 10; PRINTF(1, ("FrameGenerator: SendBuffer...\n")); /* Send the buffer on down to the consumer */ if (SendBuffer(buffer, fOutput.source, fOutput.destination) < B_OK) { PRINTF(-1, ("FrameGenerator: Error sending buffer\n")); /* If there is a problem sending the buffer, return it to its * buffer group. */ buffer->Recycle(); } _UpdateStats(); } PRINTF(1, ("FrameGenerator: thread existed.\n")); return B_OK; }
/* The following functions form the thread that generates frames. You should * replace this with the code that interfaces to your hardware. */ int32 FinePixProducer::FrameGenerator() { bigtime_t wait_until = system_time(); while (1) { status_t err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT, wait_until); /* The only acceptable responses are B_OK and B_TIMED_OUT. Everything * else means the thread should quit. Deleting the semaphore, as in * FinePixProducer::HandleStop(), will trigger this behavior. */ if ((err != B_OK) && (err != B_TIMED_OUT)) break; fFrame++; /* Recalculate the time until the thread should wake up to begin * processing the next frame. Subtract fProcessingLatency so that * the frame is sent in time. */ wait_until = TimeSource()->RealTimeFor(fPerformanceTimeBase, 0) + (bigtime_t) ((fFrame - fFrameBase) * (1000000 / fConnectedFormat.field_rate)) - fProcessingLatency; /* Drop frame if it's at least a frame late */ if (wait_until < system_time()) continue; /* If the semaphore was acquired successfully, it means something * changed the timing information (see FinePixProducer::Connect()) and * so the thread should go back to sleep until the newly-calculated * wait_until time. */ if (err == B_OK) continue; /* Send buffers only if the node is running and the output has been * enabled */ if (!fRunning || !fEnabled) continue; BAutolock _(fLock); // Get the frame from the camera fCam->GetPic(fDeltaBuffer, frame_size); /* Fetch a buffer from the buffer group */ BBuffer *buffer = fBufferGroup->RequestBuffer( 4 * fConnectedFormat.display.line_width * fConnectedFormat.display.line_count, 0LL); if (!buffer) continue; /* Fill out the details about this buffer. */ media_header *h = buffer->Header(); h->type = B_MEDIA_RAW_VIDEO; h->time_source = TimeSource()->ID(); h->size_used = 4 * fConnectedFormat.display.line_width * fConnectedFormat.display.line_count; /* For a buffer originating from a device, you might want to calculate * this based on the PerformanceTimeFor the time your buffer arrived at * the hardware (plus any applicable adjustments). h->start_time = fPerformanceTimeBase + (bigtime_t) ((fFrame - fFrameBase) * (1000000 / fConnectedFormat.field_rate));*/ h->start_time = TimeSource()->Now(); h->file_pos = 0; h->orig_size = 0; h->data_offset = 0; h->u.raw_video.field_gamma = 1.0; h->u.raw_video.field_sequence = fFrame; h->u.raw_video.field_number = 0; h->u.raw_video.pulldown_number = 0; h->u.raw_video.first_active_line = 1; h->u.raw_video.line_count = fConnectedFormat.display.line_count; // Frame data pointers uint8 *tmp24 = (uint8*)tempInBuffer; uint8 *dst = (uint8*)buffer->Data(); // Convert from jpeg to bitmap if (jpeg_check_size(fDeltaBuffer, FPIX_RGB24_WIDTH, FPIX_RGB24_HEIGHT)) { int n = jpeg_decode(fDeltaBuffer, tmp24, FPIX_RGB24_WIDTH, FPIX_RGB24_HEIGHT, 24, //32 not working &decdata); if (n) { PRINTF(-1, ("ooeps decode jpg result : %d", n)); } } else { PRINTF(-1, ("ooeps check_size failed")); } // Convert from 24 bit to 32 bit for (uint y=0; y<fConnectedFormat.display.line_count; y++) for (uint x=0; x<fConnectedFormat.display.line_width; x++) { *(dst++) = *tmp24; //red tmp24++; *(dst++) = *tmp24; //green tmp24++; *(dst++) = *tmp24; //blue tmp24++; dst++; //last 8 bit empty } /* Send the buffer on down to the consumer */ if (SendBuffer(buffer, fOutput.destination) < B_OK) { PRINTF(-1, ("FrameGenerator: Error sending buffer\n")); /* If there is a problem sending the buffer, return it to its * buffer group. */ buffer->Recycle(); } } return B_OK; }