예제 #1
0
	void IAudioMixerPlatformInterface::ReadNextBuffer()
	{
		if (AudioStreamInfo.StreamState != EAudioOutputStreamState::Stopping)
		{
			// Make sure we have a pending task in the read index
			check(CallbackTask);

			// Make sure it's done
			CallbackTask->EnsureCompletion();

			// Get the processed buffer
			const TArray<float>* Buffer = CallbackTask->GetTask().GetBuffer();

			// Submit the buffer to the platform output device
			check(Buffer);
			SubmitBuffer(*Buffer);

			// Clean up the task
			delete CallbackTask;

			// Create a new task and start it
			CallbackTask = new FAsyncAudioMixerCallbackTask(this, &OutputBuffers[CurrentBufferIndex]);
			CallbackTask->StartBackgroundTask();

			// Increment the buffer index
			CurrentBufferIndex = (CurrentBufferIndex + 1) % NumMixerBuffers;
		}
	}
예제 #2
0
/*
========================
idSoundVoice_OpenAL::RestartAt
========================
*/
int idSoundVoice_OpenAL::RestartAt( int offsetSamples )
{
	offsetSamples &= ~127;
	
	idSoundSample_OpenAL* sample = leadinSample;
	if( offsetSamples >= leadinSample->playLength )
	{
		if( loopingSample != NULL )
		{
			offsetSamples %= loopingSample->playLength;
			sample = loopingSample;
		}
		else
		{
			return 0;
		}
	}
	
	int previousNumSamples = 0;
	for( int i = 0; i < sample->buffers.Num(); i++ )
	{
		if( sample->buffers[i].numSamples > sample->playBegin + offsetSamples )
		{
			return SubmitBuffer( sample, i, sample->playBegin + offsetSamples - previousNumSamples );
		}
		previousNumSamples = sample->buffers[i].numSamples;
	}
	
	return 0;
}
예제 #3
0
StreamingVoiceContext2_7::StreamingVoiceContext2_7(IXAudio2 *pXAudio2, CMixer *pMixer, Common::Event& pSyncEvent)
	: m_mixer(pMixer)
	, m_sound_sync_event(pSyncEvent)
	, xaudio_buffer(new BYTE[NUM_BUFFERS * BUFFER_SIZE_BYTES]())
{
	WAVEFORMATEXTENSIBLE wfx = {};

	wfx.Format.wFormatTag      = WAVE_FORMAT_EXTENSIBLE;
	wfx.Format.nSamplesPerSec  = m_mixer->GetSampleRate();
	wfx.Format.nChannels       = 2;
	wfx.Format.wBitsPerSample  = 16;
	wfx.Format.nBlockAlign     = wfx.Format.nChannels*wfx.Format.wBitsPerSample / 8;
	wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign;
	wfx.Format.cbSize          = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
	wfx.Samples.wValidBitsPerSample = 16;
	wfx.dwChannelMask          = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
	wfx.SubFormat              = KSDATAFORMAT_SUBTYPE_PCM;

	// create source voice
	HRESULT hr;
	if (FAILED(hr = pXAudio2->CreateSourceVoice(&m_source_voice, &wfx.Format, XAUDIO2_VOICE_NOSRC, 1.0f, this)))
	{
		PanicAlertT("XAudio2_7 CreateSourceVoice failed: %#X", hr);
		return;
	}

	m_source_voice->Start();

	// start buffers with silence
	for (int i = 0; i != NUM_BUFFERS; ++i)
		SubmitBuffer(xaudio_buffer.get() + (i * BUFFER_SIZE_BYTES));
}
예제 #4
0
void StreamingVoiceContext2_7::OnBufferEnd(void* context)
{
	//  buffer end callback; gets SAMPLES_PER_BUFFER samples for a new buffer

	if (!m_source_voice || !context)
		return;

	//m_sound_sync_event->Wait(); // sync
	//m_sound_sync_event->Spin(); // or tight sync

	m_mixer->Mix(static_cast<short*>(context), SAMPLES_PER_BUFFER);
	SubmitBuffer(static_cast<BYTE*>(context));
}
예제 #5
0
	void IAudioMixerPlatformInterface::BeginGeneratingAudio()
	{
		FAudioPlatformDeviceInfo & DeviceInfo = AudioStreamInfo.DeviceInfo;

		// Setup the output buffers
		for (int32 Index = 0; Index < NumMixerBuffers; ++Index)
		{
			OutputBuffers[Index].SetNumZeroed(DeviceInfo.NumSamples);
		}

		// Submit the first empty buffer. This will begin audio callback notifications on some platforms.
		SubmitBuffer(OutputBuffers[CurrentBufferIndex++]);

		// Launch the task to begin generating audio
		CallbackTask = new FAsyncAudioMixerCallbackTask(this, &OutputBuffers[CurrentBufferIndex]);
		CallbackTask->StartBackgroundTask();
	}
예제 #6
0
/*
========================
idSoundVoice_OpenAL::OnBufferStart
========================
*/
void idSoundVoice_OpenAL::OnBufferStart( idSoundSample_OpenAL* sample, int bufferNumber )
{
	//SetSampleRate( sample->SampleRate(), XAUDIO2_COMMIT_NOW );
	
	idSoundSample_OpenAL* nextSample = sample;
	int nextBuffer = bufferNumber + 1;
	if( nextBuffer == sample->buffers.Num() )
	{
		if( sample == leadinSample )
		{
			if( loopingSample == NULL )
			{
				return;
			}
			nextSample = loopingSample;
		}
		nextBuffer = 0;
	}
	
	SubmitBuffer( nextSample, nextBuffer, 0 );
}
예제 #7
0
	//----------------------------------------------------------------------------------------------------
	bool EEMusic::AsyncLoadMusic(const char* _fileName)
	{
		AVFormatContext *formatContext = NULL;
		int streamIndex = -1;
		AVCodecContext *codecContext = NULL;
		AVCodec *codec = NULL;

		// open file
		if (avformat_open_input(&formatContext, _fileName, NULL, NULL) < 0)
		{
			return false;
		}

		// find stream info
		if (avformat_find_stream_info(formatContext, NULL) < 0)
		{
			//unable to find stream info
			avformat_close_input(&formatContext);
			return false;
		}
		// find the stream
		if ((streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0)) < 0)
		{
			avformat_close_input(&formatContext);
			return false;
		}

		// find decoder
		codecContext = formatContext->streams[streamIndex]->codec;
		codec = avcodec_find_decoder(codecContext->codec_id);
		if (!codec)
		{
			avformat_close_input(&formatContext);
			return false;
		}
		// open codec
		if (avcodec_open2(codecContext, codec, NULL) < 0)
		{
			avformat_close_input(&formatContext);
			return false;
		}

		int channels = codecContext->channels;
		int bitsPerSample = av_get_bits_per_sample(av_get_pcm_codec(codecContext->sample_fmt, -1));
		int bytesPerSample = bitsPerSample / 8;
		int samplesPerSec = codecContext->sample_rate;
		int blockAlign = bytesPerSample * channels;
		int avgBytesPerSec = samplesPerSec * blockAlign;
		// m_totalBytes = (int)((double)formatContext->duration / AV_TIME_BASE * avgBytesPerSec);
		// m_totalSamples = (int)((double)formatContext->duration / AV_TIME_BASE * samplesPerSec);
		// m_totalTime = formatContext->duration / (double)AV_TIME_BASE;

		if (bitsPerSample == 32)
			m_format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
		else
			m_format.wFormatTag = WAVE_FORMAT_PCM;
		m_format.nChannels = channels;
		m_format.nSamplesPerSec = samplesPerSec;
		m_format.nAvgBytesPerSec = avgBytesPerSec;
		m_format.nBlockAlign = blockAlign;
		m_format.wBitsPerSample = bitsPerSample;
		m_format.cbSize = 0;
		if (FAILED(s_XAudio2->CreateSourceVoice(&m_sourceVoice, &m_format, 0, XAUDIO2_DEFAULT_FREQ_RATIO, &m_musicCallBack)))
			return false;

		// todo: keep the the handler of the thr
		m_loader = new boost::thread([&, bytesPerSample, formatContext, streamIndex, codecContext, codec] () mutable ->bool {
			AVPacket *packet = new AVPacket;
			av_init_packet(packet);
			AVFrame	*frame = av_frame_alloc();
			uint32_t len = 0;
			int got;
			while (av_read_frame(formatContext, packet) >= 0)
			{
				if (packet->stream_index == streamIndex)
				{
					if (avcodec_decode_audio4(codecContext, frame, &got, packet) < 0)
					{
						printf("Error in decoding audio frame.\n");
						av_free_packet(packet);
						continue;
					}
					if (got > 0)
					{
						//int size = *frame->linesize;
						int size = frame->nb_samples * bytesPerSample;
						if (m_data.empty())
							m_data.push_back(std::pair<int, std::string>(0, std::string()));
						else
							m_data.push_back(std::pair<int, std::string>(m_data.back().first + m_data.back().second.size(), std::string()));
						std::string& data = m_data.back().second;
						if (av_sample_fmt_is_planar(codecContext->sample_fmt))
						{
							data.resize(size * 2);
							int index = 0;
							for (int i = 0; i < size; i += bytesPerSample)
							{
								for (int j = 0; j < bytesPerSample; ++j)
								{
									data[index++] = (char)frame->data[0][i + j];
								}
								for (int j = 0; j < bytesPerSample; ++j)
								{
									data[index++] = (char)frame->data[1][i + j];
								}
							}
							len += size * 2;
						}
						else
						{
							data.resize(size);
							memcpy((&data[0]), frame->data[0], size);
							len += size;
						}
						try
						{
							PushBuffer(EEMusicCell(&m_data.back()));
							if (EE_MUSIC_NO_BUFFER == m_state)
							{
								SubmitBuffer();
								SubmitBuffer();
								SubmitBuffer();
							}
							EEThreadSleep(1);
						}
						catch (boost::thread_interrupted&)
						{
							avcodec_close(codecContext);
							avformat_close_input(&formatContext);
							return false;
						}
					}
				}
				av_free_packet(packet);
			}
			m_totalBytes = len;
			m_totalSamples = len / m_format.nBlockAlign;
			m_totalTime = (double)m_totalSamples / m_format.nSamplesPerSec;

			av_frame_free(&frame);
			avcodec_close(codecContext);
			avformat_close_input(&formatContext);

			return true;
		});

		return true;
	}