예제 #1
0
void
ProducerNode::BufferProducer()
{
	// this thread produces one buffer each two seconds,
	// and shedules it to be handled one second later than produced
	// assuming a realtime timesource

	status_t rv;
	for (;;) {
		rv = acquire_sem_etc(mBufferProducerSem,1,B_RELATIVE_TIMEOUT,DELAY);
		if (rv == B_INTERRUPTED) {
			continue;
		} else if (rv == B_OK) {
			// triggered by AdditionalBufferRequested
			release_sem(mBufferProducerSem);
		} else if (rv != B_TIMED_OUT) {
			// triggered by deleting the semaphore (stop request)
			break;
		}
		if (!mOutputEnabled)
			continue;
			
		BBuffer *buffer;
//		out("ProducerNode: RequestBuffer\n");
		buffer = mBufferGroup->RequestBuffer(2048);
		if (!buffer) {
		}
		buffer->Header()->start_time = TimeSource()->Now() + DELAY / 2;
		out("ProducerNode: SendBuffer, sheduled time = %5.4f\n",buffer->Header()->start_time / 1E6);
		rv = SendBuffer(buffer, mOutput.destination);
		if (rv != B_OK) {
		}
	}
}
// figure processing latency by doing 'dry runs' of filterBuffer()
bigtime_t StepMotionBlurFilter::calcProcessingLatency() {
	PRINT(("StepMotionBlurFilter::calcProcessingLatency()\n"));
	
	if(m_output.destination == media_destination::null) {
		PRINT(("\tNot connected.\n"));
		return 0LL;
	}
	
	// allocate a temporary buffer group
	BBufferGroup* pTestGroup = new BBufferGroup(m_output.format.u.raw_video.display.line_width * m_output.format.u.raw_video.display.line_count *4, 1);
	
	// fetch a buffer
	BBuffer* pBuffer = pTestGroup->RequestBuffer(m_output.format.u.raw_video.display.line_width * m_output.format.u.raw_video.display.line_count * 4);
	ASSERT(pBuffer);
	
	pBuffer->Header()->type = B_MEDIA_RAW_VIDEO;
	pBuffer->Header()->size_used = m_output.format.u.raw_video.display.line_width * m_output.format.u.raw_video.display.line_count * 4;
	
	// run the test
	bigtime_t preTest = system_time();
	filterBuffer(pBuffer);
	bigtime_t elapsed = system_time()-preTest;
	
	// clean up
	pBuffer->Recycle();
	delete pTestGroup;

	// reset filter state
	initFilter();

	return elapsed;
}
예제 #3
0
bigtime_t
EqualizerNode::GetFilterLatency(void)
{
    if (fOutputMedia.destination == media_destination::null)
        return 0LL;

    BBufferGroup* test_group =
        new BBufferGroup(fOutputMedia.format.u.raw_audio.buffer_size, 1);

    BBuffer* buffer =
        test_group->RequestBuffer(fOutputMedia.format.u.raw_audio.buffer_size);
    buffer->Header()->type = B_MEDIA_RAW_AUDIO;
    buffer->Header()->size_used = fOutputMedia.format.u.raw_audio.buffer_size;

    bigtime_t begin = system_time();
    FilterBuffer(buffer);
    bigtime_t latency = system_time() - begin;

    buffer->Recycle();
    delete test_group;

    InitFilter();

    return latency;
}
예제 #4
0
파일: FlangerNode.cpp 프로젝트: DonCN/haiku
// figure processing latency by doing 'dry runs' of filterBuffer()
bigtime_t FlangerNode::calcProcessingLatency() {
	PRINT(("FlangerNode::calcProcessingLatency()\n"));

	if(m_output.destination == media_destination::null) {
		PRINT(("\tNot connected.\n"));
		return 0LL;
	}

	// allocate a temporary buffer group
	BBufferGroup* pTestGroup = new BBufferGroup(
		m_output.format.u.raw_audio.buffer_size, 1);

	// fetch a buffer
	BBuffer* pBuffer = pTestGroup->RequestBuffer(
		m_output.format.u.raw_audio.buffer_size);
	ASSERT(pBuffer);

	pBuffer->Header()->type = B_MEDIA_RAW_AUDIO;
	pBuffer->Header()->size_used = m_output.format.u.raw_audio.buffer_size;

	// run the test
	bigtime_t preTest = system_time();
	filterBuffer(pBuffer);
	bigtime_t elapsed = system_time()-preTest;

	// clean up
	pBuffer->Recycle();
	delete pTestGroup;

	// reset filter state
	initFilter();

	return elapsed;
}
예제 #5
0
BBuffer*
SoundPlayNode::FillNextBuffer(bigtime_t eventTime)
{
	CALLED();

	// get a buffer from our buffer group
	BBuffer* buffer = fBufferGroup->RequestBuffer(
		fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);

	// If we fail to get a buffer (for example, if the request times out), we
	// skip this buffer and go on to the next, to avoid locking up the control
	// thread
	if (buffer == NULL) {
		ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
		return NULL;
	}

	if (fPlayer->HasData()) {
		fPlayer->PlayBuffer(buffer->Data(),
			fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio);
	} else
		memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size);

	// fill in the buffer header
	media_header* header = buffer->Header();
	header->type = B_MEDIA_RAW_AUDIO;
	header->size_used = fOutput.format.u.raw_audio.buffer_size;
	header->time_source = TimeSource()->ID();
	header->start_time = eventTime;

	return buffer;
}
예제 #6
0
// how should we handle late buffers?  drop them?
// notify the producer?
status_t ESDSinkNode::HandleBuffer(
				const media_timed_event *event,
				bigtime_t lateness,
				bool realTimeEvent)
{
	CALLED();
	BBuffer * buffer = const_cast<BBuffer*>((BBuffer*)event->pointer);
	if (buffer == 0) {
		fprintf(stderr,"<- B_BAD_VALUE\n");
		return B_BAD_VALUE;
	}
	
	if(fInput.destination.id != buffer->Header()->destination) {
		fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
		return B_MEDIA_BAD_DESTINATION;
	}
	
	media_header* hdr = buffer->Header();
	bigtime_t now = TimeSource()->Now();
	bigtime_t perf_time = hdr->start_time;
	
	// the how_early calculate here doesn't include scheduling latency because
	// we've already been scheduled to handle the buffer
	bigtime_t how_early = perf_time - EventLatency() - now;
	
	// if the buffer is late, we ignore it and report the fact to the producer
	// who sent it to us
	if ((RunMode() != B_OFFLINE) &&				// lateness doesn't matter in offline mode...
		(RunMode() != B_RECORDING) &&		// ...or in recording mode
		(how_early < 0LL))
	{
		//mLateBuffers++;
		NotifyLateProducer(fInput.source, -how_early, perf_time);
		fprintf(stderr,"	<- LATE BUFFER : %lli\n", how_early);
		buffer->Recycle();
	} else {
		if (fDevice->CanSend())
			fDevice->Write(buffer->Data(), buffer->SizeUsed());
	}
	return B_OK;
}
예제 #7
0
void
FireWireDVNode::card_reader_thread()
{
	status_t err;
	size_t rbufsize;
	int rcount;

	fCard->GetBufInfo(&rbufsize, &rcount);
	delete fBufferGroupEncVideo;
	fBufferGroupEncVideo = new BBufferGroup(rbufsize, rcount);
	while (!fTerminateThreads) {
		void *data, *end;
		ssize_t sizeUsed = fCard->Read(&data);
		if (sizeUsed < 0) {
			TRACE("FireWireDVNode::%s: %s\n", __FUNCTION__,
				strerror(sizeUsed));
			continue;
		}

		end = (char*)data + sizeUsed;

		while (data < end) {
			BBuffer* buf = fBufferGroupEncVideo->RequestBuffer(rbufsize, 10000);
			if (!buf) {
				TRACE("OutVideo: request buffer timout\n");
				continue;
			}
			
			err = fCard->Extract(buf->Data(), &data, &sizeUsed);
			if (err) {
				buf->Recycle();
				printf("OutVideo Extract error %s\n", strerror(err));
				continue;
			}
	
			media_header* hdr = buf->Header();
			hdr->type = B_MEDIA_ENCODED_VIDEO;
			hdr->size_used = sizeUsed;
			hdr->time_source = TimeSource()->ID();	// set time source id
			//what should the start_time be?
			hdr->start_time = TimeSource()->PerformanceTimeFor(system_time());

			fLock.Lock();
			if (SendBuffer(buf, fOutputEncVideo.source,
					fOutputEncVideo.destination) != B_OK) {
				TRACE("OutVideo: sending buffer failed\n");
				buf->Recycle();
			} 
			fLock.Unlock();
		}
		
	}
}
예제 #8
0
BBuffer*
GameProducer::FillNextBuffer(bigtime_t event_time)
{
	// get a buffer from our buffer group
	BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration());

	// if we fail to get a buffer (for example, if the request times out), we
	// skip this buffer and go on to the next, to avoid locking up the control
	// thread.
	if (!buf)
		return NULL;

	// we need to discribe the buffer
	int64 frames = int64(fBufferSize / fFrameSize);
	memset(buf->Data(), 0, fBufferSize);

	// now fill the buffer with data, continuing where the last buffer left off
	fObject->Play(buf->Data(), frames);

	// fill in the buffer header
	media_header* hdr = buf->Header();
	hdr->type = B_MEDIA_RAW_AUDIO;
	hdr->size_used = fBufferSize;
	hdr->time_source = TimeSource()->ID();

	bigtime_t stamp;
	if (RunMode() == B_RECORDING) {
		// In B_RECORDING mode, we stamp with the capture time.  We're not
		// really a hardware capture node, but we simulate it by using the
		// (precalculated) time at which this buffer "should" have been created.
		stamp = event_time;
	} else {
		// okay, we're in one of the "live" performance run modes.  in these
		// modes, we stamp the buffer with the time at which the buffer should
		// be rendered to the output, not with the capture time. fStartTime is
		// the cached value of the first buffer's performance time; we calculate
		// this buffer's performance time as an offset from that time, based on
		// the amount of media we've created so far.
		// Recalculating every buffer like this avoids accumulation of error.
		stamp = fStartTime + bigtime_t(double(fFramesSent)
			/ double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
	}
	hdr->start_time = stamp;

	return buf;
}
예제 #9
0
// figure processing latency by doing 'dry runs' of processBuffer()
bigtime_t AudioFilterNode::calcProcessingLatency() {

	PRINT(("AudioFilterNode::calcProcessingLatency()\n"));
	
	ASSERT(m_input.source != media_source::null);
	ASSERT(m_output.destination != media_destination::null);
	ASSERT(m_op);

	// initialize filter
	m_op->init();

	size_t maxSize = max_c(
		m_input.format.u.raw_audio.buffer_size,
		m_output.format.u.raw_audio.buffer_size);

	// allocate a temporary buffer group
	BBufferGroup* testGroup = new BBufferGroup(
		maxSize, 1);
	
	// fetch a buffer big enough for in-place processing
	BBuffer* buffer = testGroup->RequestBuffer(
		maxSize, -1);
	ASSERT(buffer);
	
	buffer->Header()->type = B_MEDIA_RAW_AUDIO;
	buffer->Header()->size_used = m_input.format.u.raw_audio.buffer_size;
	
	// run the test
	bigtime_t preTest = system_time();
	processBuffer(buffer, buffer);
	bigtime_t elapsed = system_time()-preTest;
	
	// clean up
	buffer->Recycle();
	delete testGroup;

	// reset filter state
	m_op->init();

	return elapsed;// + 100000LL;
}
예제 #10
0
BBuffer*
ClientNode::FillNextBuffer(bigtime_t eventTime, JackPort* port)
{
    //printf("FillNextBuffer\n");

    BBuffer* buffer = port->CurrentBuffer();

    media_header* header = buffer->Header();
    header->type = B_MEDIA_RAW_AUDIO;
    header->size_used = fFormat.u.raw_audio.buffer_size;
    header->time_source = TimeSource()->ID();

    bigtime_t start;
    if (RunMode() == B_RECORDING)
        start = eventTime;
    else
        start = fTime + bigtime_t(double(fFramesSent)
                                  / double(fFormat.u.raw_audio.frame_rate) * 1000000.0);

    header->start_time = start;

    return buffer;
}
예제 #11
0
BBuffer*
ToneProducer::FillNextBuffer(bigtime_t event_time)
{
	// get a buffer from our buffer group
	BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration());

	// if we fail to get a buffer (for example, if the request times out), we skip this
	// buffer and go on to the next, to avoid locking up the control thread
	if (!buf)
	{
		return NULL;
	}

	// now fill it with data, continuing where the last buffer left off
	// 20sep99: multichannel support

	size_t numFrames =
		mOutput.format.u.raw_audio.buffer_size /
		(sizeof(float)*mOutput.format.u.raw_audio.channel_count);
	bool stereo = (mOutput.format.u.raw_audio.channel_count == 2);
	if(!stereo) {
		ASSERT(mOutput.format.u.raw_audio.channel_count == 1);
	}
//	PRINT(("buffer: %ld, %ld frames, %s\n", mOutput.format.u.raw_audio.buffer_size, numFrames, stereo ? "stereo" : "mono"));

	float* data = (float*) buf->Data();

	switch (mWaveform)
	{
	case SINE_WAVE:
		FillSineBuffer(data, numFrames, stereo);
		break;

	case TRIANGLE_WAVE:
		FillTriangleBuffer(data, numFrames, stereo);
		break;

	case SAWTOOTH_WAVE:
		FillSawtoothBuffer(data, numFrames, stereo);
		break;
	}

	// fill in the buffer header
	media_header* hdr = buf->Header();
	hdr->type = B_MEDIA_RAW_AUDIO;
	hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
	hdr->time_source = TimeSource()->ID();

	bigtime_t stamp;
	if (RunMode() == B_RECORDING)
	{
		// In B_RECORDING mode, we stamp with the capture time.  We're not
		// really a hardware capture node, but we simulate it by using the (precalculated)
		// time at which this buffer "should" have been created.
		stamp = event_time;
	}
	else
	{
		// okay, we're in one of the "live" performance run modes.  in these modes, we
		// stamp the buffer with the time at which the buffer should be rendered to the
		// output, not with the capture time.  mStartTime is the cached value of the
		// first buffer's performance time; we calculate this buffer's performance time as
		// an offset from that time, based on the amount of media we've created so far.
		// Recalculating every buffer like this avoids accumulation of error.
		stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0);
	}
	hdr->start_time = stamp;

	return buf;
}
예제 #12
0
void AudioFilterNode::BufferReceived(
	BBuffer*										buffer) {
	ASSERT(buffer);

	// check buffer destination
	if(buffer->Header()->destination !=
		m_input.destination.id) {
		PRINT(("AudioFilterNode::BufferReceived():\n"
			"\tBad destination.\n"));
		buffer->Recycle();
		return;
	}
	
	if(buffer->Header()->time_source != TimeSource()->ID()) { // +++++ no-go in offline mode
		PRINT(("* timesource mismatch\n"));
	}

	// check output
	if(m_output.destination == media_destination::null ||
		!m_outputEnabled) {
		buffer->Recycle();
		return;
	}
	
//	// +++++ [9sep99]
//	bigtime_t now = TimeSource()->Now();
//	bigtime_t delta = now - m_tpLastReceived;
//	m_tpLastReceived = now;
//	PRINT((
//		"### delta: %Ld (%Ld)\n",
//		delta, buffer->Header()->start_time - now));

	// fetch outbound buffer if needed
	BBuffer* outBuffer;
	if(m_bufferGroup) {
		outBuffer = m_bufferGroup->RequestBuffer(
			m_output.format.u.raw_audio.buffer_size, -1);
		ASSERT(outBuffer);
		
		// prepare outbound buffer
		outBuffer->Header()->type = B_MEDIA_RAW_AUDIO;

		// copy start time info from upstream node
		// +++++ is this proper, or should the next buffer-start be
		//       continuously tracked (figured from Start() or the first
		//       buffer received?)
		outBuffer->Header()->time_source = buffer->Header()->time_source;
		outBuffer->Header()->start_time = buffer->Header()->start_time;
	}
	else {
		// process inplace
		outBuffer = buffer;
	}
			
	// process and retransmit buffer
	processBuffer(buffer, outBuffer);

	status_t err = SendBuffer(outBuffer, m_output.source, m_output.destination);
	if (err < B_OK) {
		PRINT(("AudioFilterNode::BufferReceived():\n"
			"\tSendBuffer() failed: %s\n", strerror(err)));
		outBuffer->Recycle();
	}

	// free inbound buffer if data was copied	
	if(buffer != outBuffer)
		buffer->Recycle();

//	//####resend
//	SendBuffer(buffer, m_output.destination);

	// sent!
}
예제 #13
0
int32
VideoProducer::_FrameGeneratorThread()
{
	bool forceSendingBuffer = true;
	int32 droppedFrames = 0;
	const int32 kMaxDroppedFrames = 15;
	bool running = true;
	while (running) {
		TRACE("_FrameGeneratorThread: loop: %Ld\n", fFrame);
		// lock the node manager
		status_t err = fManager->LockWithTimeout(10000);
		bool ignoreEvent = false;
		// Data to be retrieved from the node manager.
		bigtime_t performanceTime = 0;
		bigtime_t nextPerformanceTime = 0;
		bigtime_t waitUntil = 0;
		bigtime_t nextWaitUntil = 0;
		int32 playingDirection = 0;
		int32 playingMode = 0;
		int64 playlistFrame = 0;
		switch (err) {
			case B_OK: {
				TRACE("_FrameGeneratorThread: node manager successfully "
					"locked\n");
				if (droppedFrames > 0)
					fManager->FrameDropped();
				// get the times for the current and the next frame
				performanceTime = fManager->TimeForFrame(fFrame);
				nextPerformanceTime = fManager->TimeForFrame(fFrame + 1);
				playingMode = fManager->PlayModeAtFrame(fFrame);
				waitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase
					+ performanceTime, fBufferLatency);
				nextWaitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase
					+ nextPerformanceTime, fBufferLatency);
				// get playing direction and playlist frame for the current
				// frame
				bool newPlayingState;
				playlistFrame = fManager->PlaylistFrameAtFrame(fFrame,
					playingDirection, newPlayingState);
				TRACE("_FrameGeneratorThread: performance time: %Ld, "
					"playlist frame: %lld\n", performanceTime, playlistFrame);
				forceSendingBuffer |= newPlayingState;
				fManager->SetCurrentVideoTime(nextPerformanceTime);
				fManager->Unlock();
				break;
			}
			case B_TIMED_OUT:
				TRACE("_FrameGeneratorThread: Couldn't lock the node "
					"manager.\n");
				ignoreEvent = true;
				waitUntil = system_time() - 1;
				break;
			default:
				ERROR("_FrameGeneratorThread: Couldn't lock the node manager. "
					"Terminating video producer frame generator thread.\n");
				TRACE("_FrameGeneratorThread: frame generator thread done.\n");
				// do not access any member variables, since this could
				// also mean the Node has been deleted
				return B_OK;
		}

		TRACE("_FrameGeneratorThread: waiting (%Ld)...\n", waitUntil);
		// wait until...
		err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT, waitUntil);
		// The only acceptable responses are B_OK and B_TIMED_OUT. Everything
		// else means the thread should quit. Deleting the semaphore, as in
		// VideoProducer::_HandleStop(), will trigger this behavior.
		switch (err) {
			case B_OK:
				TRACE("_FrameGeneratorThread: going back to sleep.\n");
				break;
			case B_TIMED_OUT:
				TRACE("_FrameGeneratorThread: timed out => event\n");
				// Catch the cases in which the node manager could not be
				// locked and we therefore have no valid data to work with,
				// or the producer is not running or enabled.
				if (ignoreEvent || !fRunning || !fEnabled) {
					TRACE("_FrameGeneratorThread: ignore event\n");
					// nothing to do
				} else if (!forceSendingBuffer
					&& nextWaitUntil < system_time() - fBufferLatency
					&& droppedFrames < kMaxDroppedFrames) {
					// Drop frame if it's at least a frame late.
					if (playingDirection > 0)
						printf("VideoProducer: dropped frame (%Ld)\n", fFrame);
					// next frame
					droppedFrames++;
					fFrame++;
				} else if (playingDirection != 0 || forceSendingBuffer) {
					// Send buffers only, if playing, the node is running and
					// the output has been enabled
					TRACE("_FrameGeneratorThread: produce frame\n");
					BAutolock _(fLock);
					// Fetch a buffer from the buffer group
					fUsedBufferGroup->WaitForBuffers();
					BBuffer* buffer = fUsedBufferGroup->RequestBuffer(
						fConnectedFormat.display.bytes_per_row
						* fConnectedFormat.display.line_count, 0LL);
					if (buffer == NULL) {
						// Wait until a buffer becomes available again
						ERROR("_FrameGeneratorThread: no buffer!\n");
						break;
					}
					// Fill out the details about this buffer.
					media_header* h = buffer->Header();
					h->type = B_MEDIA_RAW_VIDEO;
					h->time_source = TimeSource()->ID();
					h->size_used = fConnectedFormat.display.bytes_per_row
						* fConnectedFormat.display.line_count;
					// For a buffer originating from a device, you might
					// want to calculate this based on the
					// PerformanceTimeFor the time your buffer arrived at
					// the hardware (plus any applicable adjustments).
					h->start_time = fPerformanceTimeBase + performanceTime;
					h->file_pos = 0;
					h->orig_size = 0;
					h->data_offset = 0;
					h->u.raw_video.field_gamma = 1.0;
					h->u.raw_video.field_sequence = fFrame;
					h->u.raw_video.field_number = 0;
					h->u.raw_video.pulldown_number = 0;
					h->u.raw_video.first_active_line = 1;
					h->u.raw_video.line_count
						= fConnectedFormat.display.line_count;
					// Fill in a frame
					TRACE("_FrameGeneratorThread: frame: %Ld, "
						"playlistFrame: %Ld\n", fFrame, playlistFrame);
					bool wasCached = false;
					err = fSupplier->FillBuffer(playlistFrame,
						buffer->Data(), fConnectedFormat, forceSendingBuffer,
						wasCached);
					if (err == B_TIMED_OUT) {
						// Don't send the buffer if there was insufficient
						// time for rendering, this will leave the last
						// valid frame on screen until we catch up, instead
						// of going black.
						wasCached = true;
						err = B_OK;
					}
					// clean the buffer if something went wrong
					if (err != B_OK) {
						// TODO: should use "back value" according
						// to color space!
						memset(buffer->Data(), 0, h->size_used);
						err = B_OK;
					}
					// Send the buffer on down to the consumer
					if (wasCached || (err = SendBuffer(buffer, fOutput.source,
							fOutput.destination) != B_OK)) {
						// If there is a problem sending the buffer,
						// or if we don't send the buffer because its
						// contents are the same as the last one,
						// return it to its buffer group.
						buffer->Recycle();
						// we tell the supplier to delete
						// its caches if there was a problem sending
						// the buffer
						if (err != B_OK) {
							ERROR("_FrameGeneratorThread: Error "
								"sending buffer\n");
							fSupplier->DeleteCaches();
						}
					}
					// Only if everything went fine we clear the flag
					// that forces us to send a buffer even if not
					// playing.
					if (err == B_OK)
						forceSendingBuffer = false;
					// next frame
					fFrame++;
					droppedFrames = 0;
				} else {
					TRACE("_FrameGeneratorThread: not playing\n");
					// next frame
					fFrame++;
				}
				break;
			default:
				TRACE("_FrameGeneratorThread: Couldn't acquire semaphore. "
					"Error: %s\n", strerror(err));
				running = false;
				break;
		}
	}
	TRACE("_FrameGeneratorThread: frame generator thread done.\n");
	return B_OK;
}
예제 #14
0
void 
LoggingConsumer::HandleEvent(const media_timed_event *event, bigtime_t /* lateness */, bool /* realTimeEvent */)
{
	log_message logMsg;
	logMsg.now = TimeSource()->Now();
	mLogger->Log(LOG_HANDLE_EVENT, logMsg);

	switch (event->type)
	{
	case BTimedEventQueue::B_HANDLE_BUFFER:
		{
			BBuffer* buffer = const_cast<BBuffer*>((BBuffer*) event->pointer);
			if (buffer)
			{
				media_header* hdr = buffer->Header();
				if (hdr->destination == mInput.destination.id)
				{
					bigtime_t now = TimeSource()->Now();
					bigtime_t perf_time = hdr->start_time;

					// the how_early calculated here doesn't include scheduling latency because
					// we've already been scheduled to handle the buffer
					bigtime_t how_early = perf_time - mLatency - now;

					// logMsg.now is already set
					logMsg.buffer_data.start_time = perf_time;
					logMsg.buffer_data.offset = how_early;
					mLogger->Log(LOG_BUFFER_HANDLED, logMsg);

					// if the buffer is late, we ignore it and report the fact to the producer
					// who sent it to us
					if (how_early < 0)
					{
						mLateBuffers++;
						NotifyLateProducer(mInput.source, -how_early, perf_time);
					}
					else
					{
						// burn some percentage of our stated latency in CPU time (controlled by
						// a BParameter).  this simulates a user-configurable amount of CPU cost
						// associated with the consumer.
						bigtime_t spin_start = ::system_time();
						bigtime_t spin_now = spin_start;
						bigtime_t usecToSpin = bigtime_t(mSpinPercentage / 100.0 * mLatency);
						while (spin_now - spin_start < usecToSpin)
						{
							for (long k = 0; k < 1000000; k++) { /* intentionally blank */ }
							spin_now = ::system_time();
						}
					}

					// we're done "processing the buffer;" now we recycle it and return to the loop
					buffer->Recycle();
				}
				else
				{
					//fprintf(stderr, "* Woah!  Got a buffer for a different destination!\n");
				}
			}
		}
		break;

	// !!! change to B_PARAMETER as soon as it's available
	
	// +++++ e.moon [16jun99]
	// !!! this can't be right: the parameter value is accessed by the pointer
	//     originally passed to SetParameterValue().  there's no guarantee that
	//     value's still valid, is there?
	
	case BTimedEventQueue::B_USER_EVENT:
		{
			size_t dataSize = size_t(event->data);
			int32 param = int32(event->bigdata);
			logMsg.param.id = param;

			// handle the message if there's sufficient data provided.  we only check against
			// sizeof(float) because all of our parameters happen to be 4 bytes.  if various
			// parameters took different amounts of data, we'd check the size on a per-parameter
			// basis.
			if (dataSize >= sizeof(float)) switch (param)
			{
			case LATENCY_PARAM:
				{
					float value = *((float*) event->pointer);
					mLatency = bigtime_t(value* 1000);
					mLastLatencyChange = logMsg.now;

					// my latency just changed, so reconfigure the BMediaEventLooper
					// to give me my events at the proper time
					SetEventLatency(mLatency);

					// tell the producer that my latency changed, and broadcast a message
					// about the parameter change to any applications that may be looking
					// for it through the BMediaRoster::StartWatching() mechanism.
					//
					// if we had more than one input, we'd need to tell *all* producers about
					// the change in our latency.
					SendLatencyChange(mInput.source, mInput.destination, EventLatency() + SchedulingLatency());
					BroadcastNewParameterValue(logMsg.now, param, &value, sizeof(value));

					// log the new latency value, for recordkeeping
					logMsg.param.value = value;
					mLogger->Log(LOG_SET_PARAM_HANDLED, logMsg);
				}
				break;

			case CPU_SPIN_PARAM:
				{
					float value = *((float*) event->pointer);
					mSpinPercentage = value;
					mLastSpinChange = logMsg.now;
					BroadcastNewParameterValue(logMsg.now, param, &value, sizeof(value));
					logMsg.param.value = value;
					mLogger->Log(LOG_SET_PARAM_HANDLED, logMsg);
				}
				break;

			case PRIORITY_PARAM:
				{
					mPriority = *((int32*) event->pointer);
					// DO NOT use ::set_thead_priority() to directly alter the node's control
					// thread priority.  BMediaEventLooper tracks the priority itself and recalculates
					// the node's scheduling latency whenever SetPriority() is called.  This is VERY
					// important for correct functioning of a node chain.  You should *only* alter a
					// BMediaEventLooper's priority by calling its SetPriority() method.
					SetPriority(mPriority);

					mLastPrioChange = logMsg.now;
					BroadcastNewParameterValue(logMsg.now, param, &mPriority, sizeof(mPriority));
					logMsg.param.value = (float) mPriority;
					mLogger->Log(LOG_SET_PARAM_HANDLED, logMsg);
				}
				break;

			// log the fact that we "handled" a "set parameter" event for a
			// nonexistent parameter
			default:
				mLogger->Log(LOG_INVALID_PARAM_HANDLED, logMsg);
				break;
			}
		}
		break;

	case BTimedEventQueue::B_START:
		// okay, let's go!
		mLogger->Log(LOG_START_HANDLED, logMsg);
		break;

	case BTimedEventQueue::B_STOP:
		mLogger->Log(LOG_STOP_HANDLED, logMsg);
		// stopping implies not handling any more buffers.  So, we flush all pending
		// buffers out of the event queue before returning to the event loop.
		EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER);
		break;

	case BTimedEventQueue::B_SEEK:
		// seeking the log doesn't make any sense, so we just log that we handled the seek
		// and return without doing anything else
		mLogger->Log(LOG_SEEK_HANDLED, logMsg);
		break;

	case BTimedEventQueue::B_WARP:
		// similarly, time warps aren't meaningful to the logger, so just record it and return
		mLogger->Log(LOG_WARP_HANDLED, logMsg);
		break;

	case BTimedEventQueue::B_DATA_STATUS:
		// we really don't care about the producer's data status, but this is where
		// we'd do something about it if we did.
		logMsg.data_status.status = event->data;
		mLogger->Log(LOG_DATA_STATUS_HANDLED, logMsg);
		break;

	default:
		// hmm, someone enqueued a message that we don't understand.  log and ignore it.
		logMsg.unknown.what = event->type;
		mLogger->Log(LOG_HANDLE_UNKNOWN, logMsg);
		break;
	}
}
예제 #15
0
void
TVideoPreviewView::DisplayThread()
{
	FUNCTION("TVideoPreviewView::DisplayThread\n");

	bigtime_t timeout = 5000;
	bigtime_t realTimeNow = 0;
	bigtime_t perfTimeNow = 0;
	bigtime_t halfPeriod = (bigtime_t) (500000./29.97);
	bool timeSourceRunning = false;

	while (!mDisplayQuit) {
		if (acquire_sem(mServiceLock) == B_NO_ERROR) {
			timeSourceRunning = TimeSource()->IsRunning();
			realTimeNow = BTimeSource::RealTime();
			perfTimeNow = TimeSource()->Now();
			release_sem(mServiceLock);
		}

		snooze(timeout);

		if (timeSourceRunning) {

			// if we received a Stop, deal with it
			if (mStopping) {
				PROGRESS("VidConsumer::DisplayThread - STOP\n");
				if (perfTimeNow >= mStopTime) {
					mRunning = false;
					mStopping = false;

					// deal with any pending Seek
					if (mSeeking)
						mSeeking = false;

					//if (mConnected)
					//	SendDataStatus(B_DATA_NOT_AVAILABLE, mConnections[0], mStopTime);

					continue;
				}
			}

			// if we received a Seek, deal with it
			if (mSeeking) {
				PROGRESS("VidConsumer::DisplayThread - SEEK\n");
				if (perfTimeNow >= mSeekTime) {
					PROGRESS("VidConsumer::DisplayThread - DO SEEK\n");
					mSeeking = false;
					mDeltaTime = mMediaTime;

					continue;
				}
			}

			// if we received a Start, deal with it
			if (mStarting) {
				PROGRESS("BBt848Controllable::CaptureRun mStartTime = %.4f TimeNow = %.4f\n", (double)mStartTime/M1, (double)perfTimeNow/M1);
				if (perfTimeNow >= mStartTime) {
					mRunning = true;
					mStarting = false;
					mDeltaTime = mStartTime;

					//if (mConnected)
					//	SendDataStatus(B_DATA_AVAILABLE, mConnections[0], mStartTime);

					continue;
				}
			}

			if (mRunning) {
				// check for buffer available.
				status_t err = acquire_sem_etc(mBufferAvailable, 1, B_TIMEOUT, halfPeriod * 2);

				if (err == B_TIMED_OUT || !mConnected) {
					ERROR("VidConsumer::DisplayThread - Error from acquire_sem_etc: 0x%lx\n", err);
					continue;
				}

				BBuffer* buffer = mBufferQueue->PopFirstBuffer(0);

				LOOP("Popped buffer %08x, Start time: %.4f, system time: %.4f diff: %.4f\n",
				     buffer,
				     (double) buffer->Header()->start_time/M1,
				     (double) perfTimeNow/M1,
				     (double) (buffer->Header()->start_time - perfTimeNow)/M1);

				// Display frame if we're in B_OFFLINE mode or
				// within +/- a half frame time of start time
				if ( (mRunMode == B_OFFLINE) ||
				     ((perfTimeNow > (buffer->Header()->start_time - halfPeriod)) &&
				      (perfTimeNow < (buffer->Header()->start_time + halfPeriod))) ) {
					uint32 bpp = (mColorspace == B_RGB32 ? 4 : 2);
					memcpy(m_Bitmap->Bits(), buffer->Data(), mRowBytes * mYSize * bpp);
					buffer->Header()->start_time = system_time();
					buffer->Recycle();
					bigtime_t t1 = system_time();

					//	Update view
					if (LockLooper()) {
						DrawBitmap(m_Bitmap, Bounds());
						UnlockLooper();
					}

					t1 = system_time() - t1;
					if (t1/M1 > .030)
						printf("Draw time = %.4f\n",t1/M1);
					continue;
				} else {
					// If we're too early, push frame back on stack
					if (perfTimeNow < buffer->Header()->start_time) {
						LOOP("push buffer back on stack!\n");
						mBufferQueue->PushBuffer(buffer, buffer->Header()->start_time);
						release_sem(mBufferAvailable);
						continue;
					} else {
						// if we've already passed a half frame time past the buffer start time
						// and RunMode = INCREASE_LATENCY, increase latency and display the frame
						if ( (perfTimeNow > buffer->Header()->start_time) &&
						     (mRunMode == B_INCREASE_LATENCY)) {
							mMyLatency += halfPeriod;
							ERROR("VidConsumer::DisplayThread - Increased latency to: %.4f\n", mMyLatency);
							ERROR("	 Performance time: %.4f @ %.4f\n", (double)buffer->Header()->start_time/M1, (double)perfTimeNow/M1);
							uint32 bpp = (mColorspace == B_RGB32 ? 4 : 2);
							memcpy(m_Bitmap->Bits(), buffer->Data(), mRowBytes * mYSize * bpp);
							buffer->Recycle();

							// should send late notice
							if (LockLooper()) {
								DrawBitmap(m_Bitmap, Bounds());
								UnlockLooper();
							}

							continue;
						} else {
							// we're more than a half frame time past the buffer start time
							// drop the frame
							ERROR("VidConsumer::DisplayThread - dropped late frame: %.4f @ %.4f\n", (double)buffer->Header()->start_time/M1, (double)perfTimeNow/M1);
							buffer->Recycle();
							// should send late notice
							continue;
						}
					}
				}
			}
			snooze(timeout);
		}  else snooze(timeout); // if TimeSource stopped
	} // while (!mTimeToQuit)
}
예제 #16
0
void
MixerCore::_MixThread()
{
	// The broken BeOS R5 multiaudio node starts with time 0,
	// then publishes negative times for about 50ms, publishes 0
	// again until it finally reaches time values > 0
	if (!LockFromMixThread())
		return;
	bigtime_t start = fTimeSource->Now();
	Unlock();
	while (start <= 0) {
		TRACE("MixerCore: delaying _MixThread start, timesource is at %Ld\n",
			start);
		snooze(5000);
		if (!LockFromMixThread())
			return;
		start = fTimeSource->Now();
		Unlock();
	}

	if (!LockFromMixThread())
		return;
	bigtime_t latency = max((bigtime_t)3600, bigtime_t(0.4 * buffer_duration(
		fOutput->MediaOutput().format.u.raw_audio)));

	// TODO: when the format changes while running, everything is wrong!
	bigtime_t bufferRequestTimeout = buffer_duration(
		fOutput->MediaOutput().format.u.raw_audio) / 2;

	TRACE("MixerCore: starting _MixThread at %Ld with latency %Ld and "
		"downstream latency %Ld, bufferRequestTimeout %Ld\n", start, latency,
		fDownstreamLatency, bufferRequestTimeout);

	// We must read from the input buffer at a position (pos) that is always
	// a multiple of fMixBufferFrameCount.
	int64 temp = frames_for_duration(fMixBufferFrameRate, start);
	int64 frameBase = ((temp / fMixBufferFrameCount) + 1)
		* fMixBufferFrameCount;
	bigtime_t timeBase = duration_for_frames(fMixBufferFrameRate, frameBase);
	Unlock();

	TRACE("MixerCore: starting _MixThread, start %Ld, timeBase %Ld, "
		"frameBase %Ld\n", start, timeBase, frameBase);

	ASSERT(fMixBufferFrameCount > 0);

#if DEBUG
	uint64 bufferIndex = 0;
#endif

	typedef RtList<chan_info> chan_info_list;
	chan_info_list inputChanInfos[MAX_CHANNEL_TYPES];
	BStackOrHeapArray<chan_info_list, 16> mixChanInfos(fMixBufferChannelCount);
		// TODO: this does not support changing output channel count

	bigtime_t eventTime = timeBase;
	int64 framePos = 0;
	for (;;) {
		if (!LockFromMixThread())
			return;
		bigtime_t waitUntil = fTimeSource->RealTimeFor(eventTime, 0)
			- latency - fDownstreamLatency;
		Unlock();
		status_t rv = acquire_sem_etc(fMixThreadWaitSem, 1, B_ABSOLUTE_TIMEOUT,
			waitUntil);
		if (rv == B_INTERRUPTED)
			continue;
		if (rv != B_TIMED_OUT && rv < B_OK)
			return;

		if (!LockWithTimeout(10000)) {
			ERROR("MixerCore: LockWithTimeout failed\n");
			continue;
		}

		// no inputs or output muted, skip further processing and just send an
		// empty buffer
		if (fInputs->IsEmpty() || fOutput->IsMuted()) {
			int size = fOutput->MediaOutput().format.u.raw_audio.buffer_size;
			BBuffer* buffer = fBufferGroup->RequestBuffer(size,
				bufferRequestTimeout);
			if (buffer != NULL) {
				memset(buffer->Data(), 0, size);
				// fill in the buffer header
				media_header* hdr = buffer->Header();
				hdr->type = B_MEDIA_RAW_AUDIO;
				hdr->size_used = size;
				hdr->time_source = fTimeSource->ID();
				hdr->start_time = eventTime;
				if (fNode->SendBuffer(buffer, fOutput) != B_OK) {
#if DEBUG
					ERROR("MixerCore: SendBuffer failed for buffer %Ld\n",
						bufferIndex);
#else
					ERROR("MixerCore: SendBuffer failed\n");
#endif
					buffer->Recycle();
				}
			} else {
#if DEBUG
				ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n",
					bufferIndex);
#else
				ERROR("MixerCore: RequestBuffer failed\n");
#endif
			}
			goto schedule_next_event;
		}

		int64 currentFramePos;
		currentFramePos = frameBase + framePos;

		// mix all data from all inputs into the mix buffer
		ASSERT(currentFramePos % fMixBufferFrameCount == 0);

		PRINT(4, "create new buffer event at %Ld, reading input frames at "
			"%Ld\n", eventTime, currentFramePos);

		// Init the channel information for each MixerInput.
		for (int i = 0; MixerInput* input = Input(i); i++) {
			int count = input->GetMixerChannelCount();
			for (int channel = 0; channel < count; channel++) {
				int type;
				const float* base;
				uint32 sampleOffset;
				float gain;
				if (!input->GetMixerChannelInfo(channel, currentFramePos,
						eventTime, &base, &sampleOffset, &type, &gain)) {
					continue;
				}
				if (type < 0 || type >= MAX_CHANNEL_TYPES)
					continue;
				chan_info* info = inputChanInfos[type].Create();
				info->base = (const char*)base;
				info->sample_offset = sampleOffset;
				info->gain = gain;
			}
		}

		for (int channel = 0; channel < fMixBufferChannelCount; channel++) {
			int sourceCount = fOutput->GetOutputChannelSourceCount(channel);
			for (int i = 0; i < sourceCount; i++) {
				int type;
				float gain;
				fOutput->GetOutputChannelSourceInfoAt(channel, i, &type,
					&gain);
				if (type < 0 || type >= MAX_CHANNEL_TYPES)
					continue;
				int count = inputChanInfos[type].CountItems();
				for (int j = 0; j < count; j++) {
					chan_info* info = inputChanInfos[type].ItemAt(j);
					chan_info* newInfo = mixChanInfos[channel].Create();
					newInfo->base = info->base;
					newInfo->sample_offset = info->sample_offset;
					newInfo->gain = info->gain * gain;
				}
			}
		}

		memset(fMixBuffer, 0,
			fMixBufferChannelCount * fMixBufferFrameCount * sizeof(float));
		for (int channel = 0; channel < fMixBufferChannelCount; channel++) {
			PRINT(5, "_MixThread: channel %d has %d sources\n", channel,
				mixChanInfos[channel].CountItems());

			int count = mixChanInfos[channel].CountItems();
			for (int i = 0; i < count; i++) {
				chan_info* info = mixChanInfos[channel].ItemAt(i);
				PRINT(5, "_MixThread:   base %p, sample-offset %2d, gain %.3f\n",
					info->base, info->sample_offset, info->gain);
				// This looks slightly ugly, but the current GCC will generate
				// the fastest code this way.
				// fMixBufferFrameCount is always > 0.
				uint32 dstSampleOffset
					= fMixBufferChannelCount * sizeof(float);
				uint32 srcSampleOffset = info->sample_offset;
				register char* dst = (char*)&fMixBuffer[channel];
				register char* src = (char*)info->base;
				register float gain = info->gain;
				register int j = fMixBufferFrameCount;
				do {
					*(float*)dst += *(const float*)src * gain;
					dst += dstSampleOffset;
					src += srcSampleOffset;
				 } while (--j);
			}
		}

		// request a buffer
		BBuffer* buffer;
		buffer = fBufferGroup->RequestBuffer(
			fOutput->MediaOutput().format.u.raw_audio.buffer_size,
			bufferRequestTimeout);
		if (buffer != NULL) {
			// copy data from mix buffer into output buffer
			for (int i = 0; i < fMixBufferChannelCount; i++) {
				fResampler[i]->Resample(
					reinterpret_cast<char*>(fMixBuffer) + i * sizeof(float),
					fMixBufferChannelCount * sizeof(float),
					fMixBufferFrameCount,
					reinterpret_cast<char*>(buffer->Data())
						+ (i * bytes_per_sample(
							fOutput->MediaOutput().format.u.raw_audio)),
					bytes_per_frame(fOutput->MediaOutput().format.u.raw_audio),
					frames_per_buffer(
						fOutput->MediaOutput().format.u.raw_audio),
					fOutputGain * fOutput->GetOutputChannelGain(i));
			}
			PRINT(4, "send buffer, inframes %ld, outframes %ld\n",
				fMixBufferFrameCount,
				frames_per_buffer(fOutput->MediaOutput().format.u.raw_audio));

			// fill in the buffer header
			media_header* hdr = buffer->Header();
			hdr->type = B_MEDIA_RAW_AUDIO;
			hdr->size_used
				= fOutput->MediaOutput().format.u.raw_audio.buffer_size;
			hdr->time_source = fTimeSource->ID();
			hdr->start_time = eventTime;

			// swap byte order if necessary
			fOutput->AdjustByteOrder(buffer);

			// send the buffer
			status_t res = fNode->SendBuffer(buffer, fOutput);
			if (res != B_OK) {
#if DEBUG
				ERROR("MixerCore: SendBuffer failed for buffer %Ld\n",
					bufferIndex);
#else
				ERROR("MixerCore: SendBuffer failed\n");
#endif
				buffer->Recycle();
			}
		} else {
#if DEBUG
			ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n",
				bufferIndex);
#else
			ERROR("MixerCore: RequestBuffer failed\n");
#endif
		}

		// make all lists empty
		for (int i = 0; i < MAX_CHANNEL_TYPES; i++)
			inputChanInfos[i].MakeEmpty();
		for (int i = 0; i < fOutput->GetOutputChannelCount(); i++)
			mixChanInfos[i].MakeEmpty();

schedule_next_event:
		// schedule next event
		framePos += fMixBufferFrameCount;
		eventTime = timeBase + bigtime_t((1000000LL * framePos)
			/ fMixBufferFrameRate);
		Unlock();
#if DEBUG
		bufferIndex++;
#endif
	}
}
예제 #17
0
/* The following functions form the thread that generates frames. You should
 * replace this with the code that interfaces to your hardware. */
int32
VideoProducer::FrameGenerator()
{
	bigtime_t wait_until = system_time();

	while (1) {
		PRINTF(1, ("FrameGenerator: acquire_sem_etc() until %Ldµs (in %Ldµs)\n", wait_until, wait_until - system_time()));
		status_t err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT,
				wait_until);

		/* The only acceptable responses are B_OK and B_TIMED_OUT. Everything
		 * else means the thread should quit. Deleting the semaphore, as in
		 * VideoProducer::HandleStop(), will trigger this behavior. */
		if ((err != B_OK) && (err != B_TIMED_OUT))
			break;

		fFrame++;

		/* Recalculate the time until the thread should wake up to begin
		 * processing the next frame. Subtract fProcessingLatency so that
		 * the frame is sent in time. */
		wait_until = TimeSource()->RealTimeFor(fPerformanceTimeBase, 0) +
				(bigtime_t)
						((fFrame - fFrameBase) *
						(1000000 / fConnectedFormat.field_rate)) -
				fProcessingLatency;
PRINT(("PS: %Ld\n", fProcessingLatency));

		/* Drop frame if it's at least a frame late */
		if (wait_until < system_time())
			continue;

		PRINTF(1, ("FrameGenerator: wait until %Ld, %ctimed out, %crunning, %cenabled.\n",
					wait_until,
					(err == B_OK)?'!':' ',
					(fRunning)?' ':'!',
					(fEnabled)?' ':'!'));

		/* If the semaphore was acquired successfully, it means something
		 * changed the timing information (see VideoProducer::Connect()) and
		 * so the thread should go back to sleep until the newly-calculated
		 * wait_until time. */
		if (err == B_OK)
			continue;

		/* Send buffers only if the node is running and the output has been
		 * enabled */
		if (!fRunning || !fEnabled)
			continue;

		BAutolock _(fLock);

		/* Fetch a buffer from the buffer group */
		BBuffer *buffer = fBufferGroup->RequestBuffer(
						4 * fConnectedFormat.display.line_width *
						fConnectedFormat.display.line_count, 0LL);
		if (!buffer)
			continue;

		/* Fill out the details about this buffer. */
		media_header *h = buffer->Header();
		h->type = B_MEDIA_RAW_VIDEO;
		h->time_source = TimeSource()->ID();
		h->size_used = 4 * fConnectedFormat.display.line_width *
						fConnectedFormat.display.line_count;
		/* For a buffer originating from a device, you might want to calculate
		 * this based on the PerformanceTimeFor the time your buffer arrived at
		 * the hardware (plus any applicable adjustments). */
		/*
		h->start_time = fPerformanceTimeBase +
						(bigtime_t)
							((fFrame - fFrameBase) *
							(1000000 / fConnectedFormat.field_rate));
		*/
		h->file_pos = 0;
		h->orig_size = 0;
		h->data_offset = 0;
		h->u.raw_video.field_gamma = 1.0;
		h->u.raw_video.field_sequence = fFrame;
		h->u.raw_video.field_number = 0;
		h->u.raw_video.pulldown_number = 0;
		h->u.raw_video.first_active_line = 1;
		h->u.raw_video.line_count = fConnectedFormat.display.line_count;

		// This is where we fill the video buffer.

#if 0
		uint32 *p = (uint32 *)buffer->Data();
		/* Fill in a pattern */
		for (uint32 y=0;y<fConnectedFormat.display.line_count;y++)
			for (uint32 x=0;x<fConnectedFormat.display.line_width;x++)
				*(p++) = ((((x+y)^0^x)+fFrame) & 0xff) * (0x01010101 & fColor);
#endif

		//NO! must be called without lock!
		//BAutolock lock(fCamDevice->Locker());

		bigtime_t now = system_time();
		bigtime_t stamp;
//#ifdef UseFillFrameBuffer
		err = fCamDevice->FillFrameBuffer(buffer, &stamp);
		if (err < B_OK) {
			;//XXX handle error
			fStats[0].missed++;
		}
//#endif
#ifdef UseGetFrameBitmap
		BBitmap *bm;
		err = fCamDevice->GetFrameBitmap(&bm, &stamp);
		if (err >= B_OK) {
			;//XXX handle error
			fStats[0].missed++;
		}
#endif
		fStats[0].frames = fFrame;
		fStats[0].actual++;;
		fStats[0].stamp = system_time();

		//PRINTF(1, ("FrameGenerator: stamp %Ld vs %Ld\n", stamp, h->start_time));
		//XXX: that's what we should be doing, but CodyCam drops all frames as they are late. (maybe add latency ??)
		//h->start_time = TimeSource()->PerformanceTimeFor(stamp);
		h->start_time = TimeSource()->PerformanceTimeFor(system_time());


		// update processing latency
		// XXX: should I ??
		fProcessingLatency = system_time() - now;
		fProcessingLatency /= 10;

		PRINTF(1, ("FrameGenerator: SendBuffer...\n"));
		/* Send the buffer on down to the consumer */
		if (SendBuffer(buffer, fOutput.source, fOutput.destination) < B_OK) {
			PRINTF(-1, ("FrameGenerator: Error sending buffer\n"));
			/* If there is a problem sending the buffer, return it to its
			 * buffer group. */
			buffer->Recycle();
		}

		_UpdateStats();
	}

	PRINTF(1, ("FrameGenerator: thread existed.\n"));
	return B_OK;
}
예제 #18
0
/* The following functions form the thread that generates frames. You should
 * replace this with the code that interfaces to your hardware. */
int32 
FinePixProducer::FrameGenerator()
{
	bigtime_t wait_until = system_time();

	while (1) {
		status_t err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT,
				wait_until);

		/* The only acceptable responses are B_OK and B_TIMED_OUT. Everything
		 * else means the thread should quit. Deleting the semaphore, as in
		 * FinePixProducer::HandleStop(), will trigger this behavior. */
		if ((err != B_OK) && (err != B_TIMED_OUT))
			break;

		fFrame++;

		/* Recalculate the time until the thread should wake up to begin
		 * processing the next frame. Subtract fProcessingLatency so that
		 * the frame is sent in time. */
		wait_until = TimeSource()->RealTimeFor(fPerformanceTimeBase, 0) +
				(bigtime_t)
						((fFrame - fFrameBase) *
						(1000000 / fConnectedFormat.field_rate)) -
				fProcessingLatency;

		/* Drop frame if it's at least a frame late */
		if (wait_until < system_time())
			continue;

		/* If the semaphore was acquired successfully, it means something
		 * changed the timing information (see FinePixProducer::Connect()) and
		 * so the thread should go back to sleep until the newly-calculated
		 * wait_until time. */
		if (err == B_OK)
			continue;

		/* Send buffers only if the node is running and the output has been
		 * enabled */
		if (!fRunning || !fEnabled)
			continue;

		BAutolock _(fLock);

		// Get the frame from the camera
		fCam->GetPic(fDeltaBuffer, frame_size);
		
		/* Fetch a buffer from the buffer group */
		BBuffer *buffer = fBufferGroup->RequestBuffer(
						4 * fConnectedFormat.display.line_width *
						fConnectedFormat.display.line_count, 0LL);
		if (!buffer)
			continue;

		/* Fill out the details about this buffer. */
		media_header *h = buffer->Header();
		h->type = B_MEDIA_RAW_VIDEO;
		h->time_source = TimeSource()->ID();
		h->size_used = 4 * fConnectedFormat.display.line_width *
						fConnectedFormat.display.line_count;
		/* For a buffer originating from a device, you might want to calculate
		 * this based on the PerformanceTimeFor the time your buffer arrived at
		 * the hardware (plus any applicable adjustments). 
		h->start_time = fPerformanceTimeBase +
						(bigtime_t)
							((fFrame - fFrameBase) *
							(1000000 / fConnectedFormat.field_rate));*/
		h->start_time = TimeSource()->Now();
		h->file_pos = 0;
		h->orig_size = 0;
		h->data_offset = 0;
		h->u.raw_video.field_gamma = 1.0;
		h->u.raw_video.field_sequence = fFrame;
		h->u.raw_video.field_number = 0;
		h->u.raw_video.pulldown_number = 0;
		h->u.raw_video.first_active_line = 1;
		h->u.raw_video.line_count = fConnectedFormat.display.line_count;

		// Frame data pointers
		uint8 *tmp24 = (uint8*)tempInBuffer;
		uint8 *dst = (uint8*)buffer->Data();

		// Convert from jpeg to bitmap
		if (jpeg_check_size(fDeltaBuffer,
			    FPIX_RGB24_WIDTH, FPIX_RGB24_HEIGHT)) 
		{
			int n = jpeg_decode(fDeltaBuffer, tmp24,
				FPIX_RGB24_WIDTH, FPIX_RGB24_HEIGHT, 24, //32 not working
				&decdata);
			if (n) 
			{
				PRINTF(-1, ("ooeps decode jpg result : %d", n));
			}
		} else 
		{
			PRINTF(-1, ("ooeps check_size failed"));
		} 
		
		// Convert from 24 bit to 32 bit
		for (uint y=0; y<fConnectedFormat.display.line_count; y++)
			for (uint x=0; x<fConnectedFormat.display.line_width; x++) {
				*(dst++) = *tmp24; //red
				tmp24++;
				*(dst++) = *tmp24; //green
				tmp24++;
				*(dst++) = *tmp24; //blue
				tmp24++;
				dst++; //last 8 bit empty
			}

		/* Send the buffer on down to the consumer */
		if (SendBuffer(buffer, fOutput.destination) < B_OK) {
			PRINTF(-1, ("FrameGenerator: Error sending buffer\n"));
			/* If there is a problem sending the buffer, return it to its
			 * buffer group. */
			buffer->Recycle();
		}
	}

	return B_OK;
}
예제 #19
0
BBuffer*
AudioProducer::_FillNextBuffer(bigtime_t eventTime)
{
	BBuffer* buffer = fBufferGroup->RequestBuffer(
		fOutput.format.u.raw_audio.buffer_size, BufferDuration());

	if (!buffer) {
		ERROR("AudioProducer::_FillNextBuffer() - no buffer\n");
		return NULL;
	}

	size_t sampleSize = fOutput.format.u.raw_audio.format
		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
	size_t numSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize;
		// number of sample in the buffer

	// fill in the buffer header
	media_header* header = buffer->Header();
	header->type = B_MEDIA_RAW_AUDIO;
	header->time_source = TimeSource()->ID();
	buffer->SetSizeUsed(fOutput.format.u.raw_audio.buffer_size);

	bigtime_t performanceTime = bigtime_t(double(fFramesSent)
		* 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate));

	// fill in data from audio supplier
	int64 frameCount = numSamples / fOutput.format.u.raw_audio.channel_count;
	bigtime_t startTime = performanceTime;
	bigtime_t endTime = bigtime_t(double(fFramesSent + frameCount)
		* 1000000.0 / fOutput.format.u.raw_audio.frame_rate);

	if (!fSupplier || fSupplier->InitCheck() != B_OK
		|| fSupplier->GetFrames(buffer->Data(), frameCount, startTime,
			endTime) != B_OK) {
		ERROR("AudioProducer::_FillNextBuffer() - supplier error -> silence\n");
		memset(buffer->Data(), 0, buffer->SizeUsed());
	}

	// stamp buffer
	if (RunMode() == B_RECORDING) {
		header->start_time = eventTime;
	} else {
		header->start_time = fStartTime + performanceTime;
	}

#if DEBUG_TO_FILE
	BMediaTrack* track;
	if (BMediaFile* file = init_media_file(fOutput.format, &track)) {
		track->WriteFrames(buffer->Data(), frameCount);
	}
#endif // DEBUG_TO_FILE

	if (fPeakListener
		&& fOutput.format.u.raw_audio.format
			== media_raw_audio_format::B_AUDIO_FLOAT) {
		// TODO: extend the peak notifier for other sample formats
		int32 channels = fOutput.format.u.raw_audio.channel_count;
		float max[channels];
		float min[channels];
		for (int32 i = 0; i < channels; i++) {
			max[i] = -1.0;
			min[i] = 1.0;
		}
		float* sample = (float*)buffer->Data();
		for (uint32 i = 0; i < frameCount; i++) {
			for (int32 k = 0; k < channels; k++) {
				if (*sample < min[k])
					min[k] = *sample;
				if (*sample > max[k])
					max[k] = *sample;
				sample++;
			}
		}
		BMessage message(MSG_PEAK_NOTIFICATION);
		for (int32 i = 0; i < channels; i++) {
			float maxAbs = max_c(fabs(min[i]), fabs(max[i]));
			message.AddFloat("max", maxAbs);
		}
		bigtime_t realTime = TimeSource()->RealTimeFor(
			fStartTime + performanceTime, 0);
		MessageEvent* event = new (std::nothrow) MessageEvent(realTime,
			fPeakListener, message);
		if (event != NULL)
			EventQueue::Default().AddEvent(event);
	}

	return buffer;
}
예제 #20
0
status_t
BBufferConsumer::HandleMessage(int32 message, const void* data, size_t size)
{
	PRINT(4, "BBufferConsumer::HandleMessage %#lx, node %ld\n", message, ID());
	status_t rv;
	switch (message) {
		case CONSUMER_ACCEPT_FORMAT:
		{
			const consumer_accept_format_request* request
				= static_cast<const consumer_accept_format_request*>(data);

			consumer_accept_format_reply reply;
			reply.format = request->format;
			status_t status = AcceptFormat(request->dest, &reply.format);
			request->SendReply(status, &reply, sizeof(reply));
			return B_OK;
		}

		case CONSUMER_GET_NEXT_INPUT:
		{
			const consumer_get_next_input_request *request = static_cast<const consumer_get_next_input_request *>(data);
			consumer_get_next_input_reply reply;
			reply.cookie = request->cookie;
			rv = GetNextInput(&reply.cookie, &reply.input);
			request->SendReply(rv, &reply, sizeof(reply));
			return B_OK;
		}

		case CONSUMER_DISPOSE_INPUT_COOKIE:
		{
			const consumer_dispose_input_cookie_request *request = static_cast<const consumer_dispose_input_cookie_request *>(data);
			consumer_dispose_input_cookie_reply reply;
			DisposeInputCookie(request->cookie);
			request->SendReply(B_OK, &reply, sizeof(reply));
			return B_OK;
		}

		case CONSUMER_BUFFER_RECEIVED:
		{
			const consumer_buffer_received_command* command
				= static_cast<const consumer_buffer_received_command*>(data);

			BBuffer* buffer = fBufferCache->GetBuffer(command->buffer);
			if (buffer == NULL) {
				ERROR("BBufferConsumer::CONSUMER_BUFFER_RECEIVED can't"
					"find the buffer\n");
			} else {
				buffer->SetHeader(&command->header);

				PRINT(4, "calling BBufferConsumer::BufferReceived buffer %ld "
					"at perf %Ld and TimeSource()->Now() is %Ld\n",
					buffer->Header()->buffer, buffer->Header()->start_time,
					TimeSource()->Now());

				BufferReceived(buffer);
			}
			return B_OK;
		}

		case CONSUMER_PRODUCER_DATA_STATUS:
		{
			const consumer_producer_data_status_command *command = static_cast<const consumer_producer_data_status_command *>(data);
			ProducerDataStatus(command->for_whom, command->status, command->at_performance_time);
			return B_OK;
		}

		case CONSUMER_GET_LATENCY_FOR:
		{
			const consumer_get_latency_for_request *request = static_cast<const consumer_get_latency_for_request *>(data);
			consumer_get_latency_for_reply reply;
			rv = GetLatencyFor(request->for_whom, &reply.latency, &reply.timesource);
			request->SendReply(rv, &reply, sizeof(reply));
			return B_OK;
		}

		case CONSUMER_CONNECTED:
		{
			const consumer_connected_request *request = static_cast<const consumer_connected_request *>(data);
			consumer_connected_reply reply;
			reply.input = request->input;
			rv = Connected(request->input.source, request->input.destination, request->input.format, &reply.input);
			request->SendReply(rv, &reply, sizeof(reply));
			return B_OK;
		}

		case CONSUMER_DISCONNECTED:
		{
			const consumer_disconnected_request *request = static_cast<const consumer_disconnected_request *>(data);
			consumer_disconnected_reply reply;
			Disconnected(request->source, request->destination);
			request->SendReply(B_OK, &reply, sizeof(reply));
			return B_OK;
		}

		case CONSUMER_FORMAT_CHANGED:
		{
			const consumer_format_changed_request *request = static_cast<const consumer_format_changed_request *>(data);
			consumer_format_changed_reply reply;
			rv = FormatChanged(request->producer, request->consumer, request->change_tag, request->format);
			request->SendReply(rv, &reply, sizeof(reply));

			// XXX is this RequestCompleted() correct?
			node_request_completed_command completedcommand;
			completedcommand.info.what = media_request_info::B_FORMAT_CHANGED;
			completedcommand.info.change_tag = request->change_tag;
			completedcommand.info.status = reply.result;
			//completedcommand.info.cookie
			completedcommand.info.user_data = 0;
			completedcommand.info.source = request->producer;
			completedcommand.info.destination = request->consumer;
			completedcommand.info.format = request->format;
			SendToPort(request->consumer.port, NODE_REQUEST_COMPLETED, &completedcommand, sizeof(completedcommand));
			return B_OK;
		}

		case CONSUMER_SEEK_TAG_REQUESTED:
		{
			const consumer_seek_tag_requested_request *request = static_cast<const consumer_seek_tag_requested_request *>(data);
			consumer_seek_tag_requested_reply reply;
			rv = SeekTagRequested(request->destination, request->target_time, request->flags, &reply.seek_tag, &reply.tagged_time, &reply.flags);
			request->SendReply(rv, &reply, sizeof(reply));
			return B_OK;
		}
	}
	return B_ERROR;
}
예제 #21
0
// _FrameGenerator
int32 
VideoProducer::_FrameGenerator()
{
	bool forceSendingBuffer = true;
	bigtime_t lastFrameSentAt = 0;
	bool running = true;
	while (running) {
ldebug("VideoProducer: loop: %Ld\n", fFrame);
		// lock the node manager
		status_t err = fManager->LockWithTimeout(10000);
		bool ignoreEvent = false;
		// Data to be retrieved from the node manager.
		bigtime_t performanceTime = 0;
		bigtime_t nextPerformanceTime = 0;
		bigtime_t waitUntil = 0;
		bigtime_t nextWaitUntil = 0;
		bigtime_t maxRenderTime = 0;
		int32 playingDirection = 0;
		int64 playlistFrame = 0;
		switch (err) {
			case B_OK: {
ldebug("VideoProducer: node manager successfully locked\n");
				// get the times for the current and the next frame
				performanceTime = fManager->TimeForFrame(fFrame);
				nextPerformanceTime = fManager->TimeForFrame(fFrame + 1);
				maxRenderTime = min_c(bigtime_t(33334 * 0.9),
									  max_c(fSupplier->ProcessingLatency(), maxRenderTime));

				waitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase
													+ performanceTime, 0) - maxRenderTime;
				nextWaitUntil = TimeSource()->RealTimeFor(fPerformanceTimeBase
													+ nextPerformanceTime, 0) - maxRenderTime;
				// get playing direction and playlist frame for the current frame
				bool newPlayingState;
				playlistFrame = fManager->PlaylistFrameAtFrame(fFrame,
															   playingDirection,
															   newPlayingState);
ldebug("VideoProducer: performance time: %Ld, playlist frame: %Ld\n",
performanceTime, playlistFrame);
				forceSendingBuffer |= newPlayingState;
				fManager->SetCurrentVideoTime(nextPerformanceTime);
				fManager->Unlock();
				break;
			}
			case B_TIMED_OUT:
ldebug("VideoProducer: Couldn't lock the node manager.\n");
				ignoreEvent = true;
				waitUntil = system_time() - 1;
				break;
			default:
				printf("Couldn't lock the node manager. Terminating video producer "
					   "frame generator thread.\n");
				ignoreEvent = true;
				waitUntil = system_time() - 1;
				running = false;
				break;
		}
		// Force sending a frame, if the last one has been sent more than
		// one second ago.
		if (lastFrameSentAt + 1000000 < performanceTime)
			forceSendingBuffer = true;

ldebug("VideoProducer: waiting (%Ld)...\n", waitUntil);
		// wait until...
		err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT, waitUntil);
		// The only acceptable responses are B_OK and B_TIMED_OUT. Everything
		// else means the thread should quit. Deleting the semaphore, as in
		// VideoProducer::_HandleStop(), will trigger this behavior.
		switch (err) {
			case B_OK:
ldebug("VideoProducer::_FrameGenerator - going back to sleep.\n");
				break;
			case B_TIMED_OUT:
ldebug("VideoProducer: timed out => event\n");
				// Catch the cases in which the node manager could not be
				// locked and we therefore have no valid data to work with,
				// or the producer is not running or enabled.
				if (ignoreEvent || !fRunning || !fEnabled) {
ldebug("VideoProducer: ignore event\n");
					// nothing to do
				// Drop frame if it's at least a frame late.
				} else if (nextWaitUntil < system_time()) {
//printf("VideoProducer: dropped frame (%ld)\n", fFrame);
					if (fManager->LockWithTimeout(10000) == B_OK) {
						fManager->FrameDropped();
						fManager->Unlock();
					}
					// next frame
					fFrame++;
				// Send buffers only, if playing, the node is running and the
				// output has been enabled
				} else if (playingDirection != 0 || forceSendingBuffer) {
ldebug("VideoProducer: produce frame\n");
					BAutolock _(fLock);
					// Fetch a buffer from the buffer group
					BBuffer *buffer = fUsedBufferGroup->RequestBuffer(
						fConnectedFormat.display.bytes_per_row
						* fConnectedFormat.display.line_count, 0LL);
					if (buffer) {
						// Fill out the details about this buffer.
						media_header *h = buffer->Header();
						h->type = B_MEDIA_RAW_VIDEO;
						h->time_source = TimeSource()->ID();
						h->size_used = fConnectedFormat.display.bytes_per_row
									   * fConnectedFormat.display.line_count;
						// For a buffer originating from a device, you might
						// want to calculate this based on the
						// PerformanceTimeFor the time your buffer arrived at
						// the hardware (plus any applicable adjustments).
						h->start_time = fPerformanceTimeBase + performanceTime;
						h->file_pos = 0;
						h->orig_size = 0;
						h->data_offset = 0;
						h->u.raw_video.field_gamma = 1.0;
						h->u.raw_video.field_sequence = fFrame;
						h->u.raw_video.field_number = 0;
						h->u.raw_video.pulldown_number = 0;
						h->u.raw_video.first_active_line = 1;
						h->u.raw_video.line_count
							= fConnectedFormat.display.line_count;
						// Fill in a frame
						media_format mf;
						mf.type = B_MEDIA_RAW_VIDEO;
						mf.u.raw_video = fConnectedFormat;
ldebug("VideoProducer: frame: %Ld, playlistFrame: %Ld\n", fFrame, playlistFrame);
						bool forceOrWasCached = forceSendingBuffer;
	
//						if (fManager->LockWithTimeout(5000) == B_OK) {
							// we need to lock the manager, or our
							// fSupplier might work on bad data
							err = fSupplier->FillBuffer(playlistFrame,
														buffer->Data(), &mf,
														forceOrWasCached);
//							fManager->Unlock();
//						} else {
//							err = B_ERROR;
//						}
						// clean the buffer if something went wrong
						if (err != B_OK) {
							memset(buffer->Data(), 0, h->size_used);
							err = B_OK;
						}
						// Send the buffer on down to the consumer
						if (!forceOrWasCached) {
							if (SendBuffer(buffer, fOutput.source,
									fOutput.destination) != B_OK) {
								printf("_FrameGenerator: Error sending buffer\n");
								// If there is a problem sending the buffer,
								// or if we don't send the buffer because its
								// contents are the same as the last one,
								// return it to its buffer group.
								buffer->Recycle();
								// we tell the supplier to delete
								// its caches if there was a problem sending
								// the buffer
								fSupplier->DeleteCaches();
							}
						} else
							buffer->Recycle();
						// Only if everything went fine we clear the flag
						// that forces us to send a buffer even if not
						// playing.
						if (err == B_OK) {
							forceSendingBuffer = false;
							lastFrameSentAt = performanceTime;
						}
					}
else ldebug("no buffer!\n");
					// next frame
					fFrame++;
				} else {
ldebug("VideoProducer: not playing\n");
					// next frame
					fFrame++;
				}
				break;
			default:
ldebug("Couldn't acquire semaphore. Error: %s\n", strerror(err));
				running = false;
				break;
		}
	}
ldebug("VideoProducer: frame generator thread done.\n");
	return B_OK;
}
예제 #22
0
void
ConsumerNode::HandleEvent(const media_timed_event *event,
                          bigtime_t lateness,
                          bool realTimeEvent)
{
    switch (event->type)
    {
    case BTimedEventQueue::B_HANDLE_BUFFER:
    {
        out("ConsumerNode::HandleEvent B_HANDLE_BUFFER\n");
        BBuffer* buffer = const_cast<BBuffer*>((BBuffer*) event->pointer);

        out("### sheduled time = %5.4f, current time = %5.4f, lateness = %5.4f\n",buffer->Header()->start_time / 1E6,TimeSource()->Now() / 1E6,lateness / 1E6);

        snooze((rand()*100) % 200000);

        if (buffer)
            buffer->Recycle();
    }
    break;

    case BTimedEventQueue::B_PARAMETER:
    {
        out("ConsumerNode::HandleEvent B_PARAMETER\n");
    }
    break;

    case BTimedEventQueue::B_START:
    {
        out("ConsumerNode::HandleEvent B_START\n");
    }
    break;

    case BTimedEventQueue::B_STOP:
    {
        out("ConsumerNode::HandleEvent B_STOP\n");
    }
        // stopping implies not handling any more buffers.  So, we flush all pending
        // buffers out of the event queue before returning to the event loop.
    EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER);
    break;

    case BTimedEventQueue::B_SEEK:
    {
        out("ConsumerNode::HandleEvent B_SEEK\n");
    }
    break;

    case BTimedEventQueue::B_WARP:
    {
        out("ConsumerNode::HandleEvent B_WARP\n");
    }
        // similarly, time warps aren't meaningful to the logger, so just record it and return
        //mLogger->Log(LOG_WARP_HANDLED, logMsg);
    break;

    case BTimedEventQueue::B_DATA_STATUS:
    {
        out("ConsumerNode::HandleEvent B_DATA_STATUS\n");
    }
    break;

    default:
    {
        out("ConsumerNode::HandleEvent default\n");
    }
    break;
    }
}