Beispiel #1
0
void FlangerNode::Connect(
	status_t status,
	const media_source& source,
	const media_destination& destination,
	const media_format& format,
	char* pioName) {

	PRINT(("FlangerNode::Connect()\n"));
	status_t err;

	// connection failed?
	if(status < B_OK) {
		PRINT(("\tStatus: %s\n", strerror(status)));
		// 'unreserve' the output
		m_output.destination = media_destination::null;
		return;
	}

	// connection established:
	strncpy(pioName, m_output.name, B_MEDIA_NAME_LENGTH);
	m_output.destination = destination;
	m_format = format;

	// figure downstream latency
	media_node_id timeSource;
	err = FindLatencyFor(m_output.destination, &m_downstreamLatency, &timeSource);
	if(err < B_OK) {
		PRINT(("\t!!! FindLatencyFor(): %s\n", strerror(err)));
	}
	PRINT(("\tdownstream latency = %Ld\n", m_downstreamLatency));

	// prepare the filter
	initFilter();

	// figure processing time
	m_processingLatency = calcProcessingLatency();
	PRINT(("\tprocessing latency = %Ld\n", m_processingLatency));

	// store summed latency
	SetEventLatency(m_downstreamLatency + m_processingLatency);

	if(m_input.source != media_source::null) {
		// pass new latency upstream
		err = SendLatencyChange(
			m_input.source,
			m_input.destination,
			EventLatency() + SchedulingLatency());
		if(err < B_OK)
			PRINT(("\t!!! SendLatencyChange(): %s\n", strerror(err)));
	}

	// cache buffer duration
	SetBufferDuration(
		buffer_duration(
			m_format.u.raw_audio));
}
Beispiel #2
0
void AudioFilterNode::Connect(
	status_t										status,
	const media_source&					source,
	const media_destination&		destination,
	const media_format&					format,
	char*												ioName) {
	
	PRINT(("AudioFilterNode::Connect()\n"));
	status_t err;

#if DEBUG
	char formatStr[256];
	string_for_format(format, formatStr, 255);
	PRINT(("\tformat: %s\n", formatStr));
#endif

	// connection failed?	
	if(status < B_OK) {
		PRINT(("\tCONNECTION FAILED: Status '%s'\n", strerror(status)));
		// 'unreserve' the output
		m_output.destination = media_destination::null;
		return;
	}
	
	// connection established:
	strncpy(ioName, m_output.name, B_MEDIA_NAME_LENGTH);
	m_output.destination = destination;
	
	// figure downstream latency
	media_node_id timeSource;
	err = FindLatencyFor(m_output.destination, &m_downstreamLatency, &timeSource);
	if(err < B_OK) {
		PRINT(("\t!!! FindLatencyFor(): %s\n", strerror(err)));
	}
	PRINT(("\tdownstream latency = %Ld\n", m_downstreamLatency));
	
//	// prepare the filter
//	initFilter();
//
//	// figure processing time
//	m_processingLatency = calcProcessingLatency();
//	PRINT(("\tprocessing latency = %Ld\n", m_processingLatency));
//	
//	// store summed latency
//	SetEventLatency(m_downstreamLatency + m_processingLatency);
//	
//	if(m_input.source != media_source::null) {
//		// pass new latency upstream
//		err = SendLatencyChange(
//			m_input.source,
//			m_input.destination,
//			EventLatency() + SchedulingLatency());
//		if(err < B_OK)
//			PRINT(("\t!!! SendLatencyChange(): %s\n", strerror(err)));
//	}

	// cache buffer duration
	SetBufferDuration(
		buffer_duration(
			m_output.format.u.raw_audio));
			
	// [re-]initialize operation
	updateOperation();	

	// initialize buffer group if necessary
	updateBufferGroup();
}
Beispiel #3
0
void
MixerCore::_MixThread()
{
	// The broken BeOS R5 multiaudio node starts with time 0,
	// then publishes negative times for about 50ms, publishes 0
	// again until it finally reaches time values > 0
	if (!LockFromMixThread())
		return;
	bigtime_t start = fTimeSource->Now();
	Unlock();
	while (start <= 0) {
		TRACE("MixerCore: delaying _MixThread start, timesource is at %Ld\n",
			start);
		snooze(5000);
		if (!LockFromMixThread())
			return;
		start = fTimeSource->Now();
		Unlock();
	}

	if (!LockFromMixThread())
		return;
	bigtime_t latency = max((bigtime_t)3600, bigtime_t(0.4 * buffer_duration(
		fOutput->MediaOutput().format.u.raw_audio)));

	// TODO: when the format changes while running, everything is wrong!
	bigtime_t bufferRequestTimeout = buffer_duration(
		fOutput->MediaOutput().format.u.raw_audio) / 2;

	TRACE("MixerCore: starting _MixThread at %Ld with latency %Ld and "
		"downstream latency %Ld, bufferRequestTimeout %Ld\n", start, latency,
		fDownstreamLatency, bufferRequestTimeout);

	// We must read from the input buffer at a position (pos) that is always
	// a multiple of fMixBufferFrameCount.
	int64 temp = frames_for_duration(fMixBufferFrameRate, start);
	int64 frameBase = ((temp / fMixBufferFrameCount) + 1)
		* fMixBufferFrameCount;
	bigtime_t timeBase = duration_for_frames(fMixBufferFrameRate, frameBase);
	Unlock();

	TRACE("MixerCore: starting _MixThread, start %Ld, timeBase %Ld, "
		"frameBase %Ld\n", start, timeBase, frameBase);

	ASSERT(fMixBufferFrameCount > 0);

#if DEBUG
	uint64 bufferIndex = 0;
#endif

	typedef RtList<chan_info> chan_info_list;
	chan_info_list inputChanInfos[MAX_CHANNEL_TYPES];
	BStackOrHeapArray<chan_info_list, 16> mixChanInfos(fMixBufferChannelCount);
		// TODO: this does not support changing output channel count

	bigtime_t eventTime = timeBase;
	int64 framePos = 0;
	for (;;) {
		if (!LockFromMixThread())
			return;
		bigtime_t waitUntil = fTimeSource->RealTimeFor(eventTime, 0)
			- latency - fDownstreamLatency;
		Unlock();
		status_t rv = acquire_sem_etc(fMixThreadWaitSem, 1, B_ABSOLUTE_TIMEOUT,
			waitUntil);
		if (rv == B_INTERRUPTED)
			continue;
		if (rv != B_TIMED_OUT && rv < B_OK)
			return;

		if (!LockWithTimeout(10000)) {
			ERROR("MixerCore: LockWithTimeout failed\n");
			continue;
		}

		// no inputs or output muted, skip further processing and just send an
		// empty buffer
		if (fInputs->IsEmpty() || fOutput->IsMuted()) {
			int size = fOutput->MediaOutput().format.u.raw_audio.buffer_size;
			BBuffer* buffer = fBufferGroup->RequestBuffer(size,
				bufferRequestTimeout);
			if (buffer != NULL) {
				memset(buffer->Data(), 0, size);
				// fill in the buffer header
				media_header* hdr = buffer->Header();
				hdr->type = B_MEDIA_RAW_AUDIO;
				hdr->size_used = size;
				hdr->time_source = fTimeSource->ID();
				hdr->start_time = eventTime;
				if (fNode->SendBuffer(buffer, fOutput) != B_OK) {
#if DEBUG
					ERROR("MixerCore: SendBuffer failed for buffer %Ld\n",
						bufferIndex);
#else
					ERROR("MixerCore: SendBuffer failed\n");
#endif
					buffer->Recycle();
				}
			} else {
#if DEBUG
				ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n",
					bufferIndex);
#else
				ERROR("MixerCore: RequestBuffer failed\n");
#endif
			}
			goto schedule_next_event;
		}

		int64 currentFramePos;
		currentFramePos = frameBase + framePos;

		// mix all data from all inputs into the mix buffer
		ASSERT(currentFramePos % fMixBufferFrameCount == 0);

		PRINT(4, "create new buffer event at %Ld, reading input frames at "
			"%Ld\n", eventTime, currentFramePos);

		// Init the channel information for each MixerInput.
		for (int i = 0; MixerInput* input = Input(i); i++) {
			int count = input->GetMixerChannelCount();
			for (int channel = 0; channel < count; channel++) {
				int type;
				const float* base;
				uint32 sampleOffset;
				float gain;
				if (!input->GetMixerChannelInfo(channel, currentFramePos,
						eventTime, &base, &sampleOffset, &type, &gain)) {
					continue;
				}
				if (type < 0 || type >= MAX_CHANNEL_TYPES)
					continue;
				chan_info* info = inputChanInfos[type].Create();
				info->base = (const char*)base;
				info->sample_offset = sampleOffset;
				info->gain = gain;
			}
		}

		for (int channel = 0; channel < fMixBufferChannelCount; channel++) {
			int sourceCount = fOutput->GetOutputChannelSourceCount(channel);
			for (int i = 0; i < sourceCount; i++) {
				int type;
				float gain;
				fOutput->GetOutputChannelSourceInfoAt(channel, i, &type,
					&gain);
				if (type < 0 || type >= MAX_CHANNEL_TYPES)
					continue;
				int count = inputChanInfos[type].CountItems();
				for (int j = 0; j < count; j++) {
					chan_info* info = inputChanInfos[type].ItemAt(j);
					chan_info* newInfo = mixChanInfos[channel].Create();
					newInfo->base = info->base;
					newInfo->sample_offset = info->sample_offset;
					newInfo->gain = info->gain * gain;
				}
			}
		}

		memset(fMixBuffer, 0,
			fMixBufferChannelCount * fMixBufferFrameCount * sizeof(float));
		for (int channel = 0; channel < fMixBufferChannelCount; channel++) {
			PRINT(5, "_MixThread: channel %d has %d sources\n", channel,
				mixChanInfos[channel].CountItems());

			int count = mixChanInfos[channel].CountItems();
			for (int i = 0; i < count; i++) {
				chan_info* info = mixChanInfos[channel].ItemAt(i);
				PRINT(5, "_MixThread:   base %p, sample-offset %2d, gain %.3f\n",
					info->base, info->sample_offset, info->gain);
				// This looks slightly ugly, but the current GCC will generate
				// the fastest code this way.
				// fMixBufferFrameCount is always > 0.
				uint32 dstSampleOffset
					= fMixBufferChannelCount * sizeof(float);
				uint32 srcSampleOffset = info->sample_offset;
				register char* dst = (char*)&fMixBuffer[channel];
				register char* src = (char*)info->base;
				register float gain = info->gain;
				register int j = fMixBufferFrameCount;
				do {
					*(float*)dst += *(const float*)src * gain;
					dst += dstSampleOffset;
					src += srcSampleOffset;
				 } while (--j);
			}
		}

		// request a buffer
		BBuffer* buffer;
		buffer = fBufferGroup->RequestBuffer(
			fOutput->MediaOutput().format.u.raw_audio.buffer_size,
			bufferRequestTimeout);
		if (buffer != NULL) {
			// copy data from mix buffer into output buffer
			for (int i = 0; i < fMixBufferChannelCount; i++) {
				fResampler[i]->Resample(
					reinterpret_cast<char*>(fMixBuffer) + i * sizeof(float),
					fMixBufferChannelCount * sizeof(float),
					fMixBufferFrameCount,
					reinterpret_cast<char*>(buffer->Data())
						+ (i * bytes_per_sample(
							fOutput->MediaOutput().format.u.raw_audio)),
					bytes_per_frame(fOutput->MediaOutput().format.u.raw_audio),
					frames_per_buffer(
						fOutput->MediaOutput().format.u.raw_audio),
					fOutputGain * fOutput->GetOutputChannelGain(i));
			}
			PRINT(4, "send buffer, inframes %ld, outframes %ld\n",
				fMixBufferFrameCount,
				frames_per_buffer(fOutput->MediaOutput().format.u.raw_audio));

			// fill in the buffer header
			media_header* hdr = buffer->Header();
			hdr->type = B_MEDIA_RAW_AUDIO;
			hdr->size_used
				= fOutput->MediaOutput().format.u.raw_audio.buffer_size;
			hdr->time_source = fTimeSource->ID();
			hdr->start_time = eventTime;

			// swap byte order if necessary
			fOutput->AdjustByteOrder(buffer);

			// send the buffer
			status_t res = fNode->SendBuffer(buffer, fOutput);
			if (res != B_OK) {
#if DEBUG
				ERROR("MixerCore: SendBuffer failed for buffer %Ld\n",
					bufferIndex);
#else
				ERROR("MixerCore: SendBuffer failed\n");
#endif
				buffer->Recycle();
			}
		} else {
#if DEBUG
			ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n",
				bufferIndex);
#else
			ERROR("MixerCore: RequestBuffer failed\n");
#endif
		}

		// make all lists empty
		for (int i = 0; i < MAX_CHANNEL_TYPES; i++)
			inputChanInfos[i].MakeEmpty();
		for (int i = 0; i < fOutput->GetOutputChannelCount(); i++)
			mixChanInfos[i].MakeEmpty();

schedule_next_event:
		// schedule next event
		framePos += fMixBufferFrameCount;
		eventTime = timeBase + bigtime_t((1000000LL * framePos)
			/ fMixBufferFrameRate);
		Unlock();
#if DEBUG
		bufferIndex++;
#endif
	}
}