Example #1
0
	virtual const char* open( void * hwnd, unsigned sample_rate, unsigned nch, unsigned max_samples_per_frame, unsigned num_frames )
	{
		this->hwnd = hwnd;
		this->sample_rate = sample_rate;
		this->nch = nch;
		this->max_samples_per_frame = max_samples_per_frame;
		this->num_frames = num_frames;

#ifdef HAVE_KS_HEADERS
		WAVEFORMATEXTENSIBLE wfx;
		wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
		wfx.Format.nChannels = nch; //1;
		wfx.Format.nSamplesPerSec = sample_rate;
		wfx.Format.nBlockAlign = 2 * nch; //2;
		wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign;
		wfx.Format.wBitsPerSample = 16;
		wfx.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
		wfx.Samples.wValidBitsPerSample = 16;
		wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
		wfx.dwChannelMask = nch == 2 ? KSAUDIO_SPEAKER_STEREO : KSAUDIO_SPEAKER_MONO;
#else
		WAVEFORMATEX wfx;
		wfx.wFormatTag = WAVE_FORMAT_PCM;
		wfx.nChannels = nch; //1;
		wfx.nSamplesPerSec = sample_rate;
		wfx.nBlockAlign = 2 * nch; //2;
		wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
		wfx.wBitsPerSample = 16;
		wfx.cbSize = 0;
#endif
        HRESULT hr = XAudio2Create( &xaud, 0 );
		if (FAILED(hr)) return "Creating XAudio2 interface";
		hr = xaud->CreateMasteringVoice(
			&mVoice,
			nch,
			sample_rate,
			0,
			NULL,
			NULL );
		if (FAILED(hr)) return "Creating XAudio2 mastering voice";
		hr = xaud->CreateSourceVoice( &sVoice, &wfx, 0, 4.0f, &notify );
		if (FAILED(hr)) return "Creating XAudio2 source voice";
		hr = sVoice->Start( 0 );
		if (FAILED(hr)) return "Starting XAudio2 voice";
		hr = sVoice->SetFrequencyRatio((float)1.0f);
		if (FAILED(hr)) return "Setting XAudio2 voice frequency ratio";
		buffered_count = 0;
		buffer_read_cursor = 0;
		buffer_write_cursor = 0;
		samples_played = 0;
		sample_buffer = new int16_t[ max_samples_per_frame * num_frames ];
		samples_in_buffer = new UINT64[ num_frames ];
		memset( samples_in_buffer, 0, sizeof( UINT64 ) * num_frames );
		return NULL;
	}
void OutputDeviceNodeXAudio::initSourceVoice()
{
	CI_ASSERT( ! mSourceVoice );

	auto context = dynamic_pointer_cast<ContextXAudio>( getContext() );

	auto wfx = msw::interleavedFloatWaveFormat( getSampleRate(), getNumChannels() );

	IXAudio2 *xaudio = context->getXAudio();
	UINT32 flags = ( mFilterEnabled ? XAUDIO2_VOICE_USEFILTER : 0 );
	HRESULT hr = xaudio->CreateSourceVoice( &mSourceVoice, wfx.get(), flags, XAUDIO2_DEFAULT_FREQ_RATIO, mVoiceCallback.get() );
	CI_ASSERT( hr == S_OK );
}
Example #3
0
void XAudio2_Output::close()
{
	initialized = false;

	if( sVoice ) {
		if( playing ) {
			HRESULT hr = sVoice->Stop( 0 );
			ASSERT( hr == S_OK );
		}
		sVoice->DestroyVoice();
		sVoice = NULL;
	}

	if( buffers ) {
		free( buffers );
		buffers = NULL;
	}

	if( mVoice ) {
		mVoice->DestroyVoice();
		mVoice = NULL;
	}

	if( xaud ) {
		xaud->Release();
		xaud = NULL;
	}
}
Example #4
0
void SFXXAudioProvider::init()
{
   // Create a temp XAudio object for device enumeration.
   IXAudio2 *xAudio = NULL;
   if ( !_createXAudio( &xAudio ) )
   {
      Con::errorf( "SFXXAudioProvider::init() - XAudio2 failed to load!" );
      return;
   }

   // Add the devices to the info list.
   UINT32 count = 0;
   xAudio->GetDeviceCount( &count );
   for ( UINT32 i = 0; i < count; i++ )
   {
      XAUDIO2_DEVICE_DETAILS details;
      HRESULT hr = xAudio->GetDeviceDetails( i, &details );
      if ( FAILED( hr ) )
         continue;

      // Add a device to the info list.
      XADeviceInfo* info = new XADeviceInfo;
      info->deviceIndex = i;
      info->driver = String( "XAudio" );
      info->name = String( details.DisplayName );
      info->hasHardware = false;
      info->maxBuffers = 64;
      info->role = details.Role;
      info->format = details.OutputFormat;
      mDeviceInfo.push_back( info );
   }

   // We're done with XAudio for now.
   SAFE_RELEASE( xAudio );

   // If we have no devices... we're done.
   if ( mDeviceInfo.empty() )
   {
      Con::errorf( "SFXXAudioProvider::init() - No valid XAudio2 devices found!" );
      return;
   }

   // If we got this far then we should be able to
   // safely create a device for XAudio.
   regProvider( this );
}
Example #5
0
bool GetXA2Devices(wxArrayString &names, wxArrayString &ids)
{
	HRESULT hr;
	IXAudio2 *xa = NULL;
	UINT32 flags = 0;
#ifdef _DEBUG
	flags = XAUDIO2_DEBUG_ENGINE;
#endif

	hr = XAudio2Create( &xa, flags );
	if( hr != S_OK ) {
		wxLogError( _("The XAudio2 interface failed to initialize!") );
		return false;
	}
	GetXA2Devices(xa, &names, &ids, NULL);
	xa->Release();
	return true;
}
Example #6
0
		void release()
		{
			if (pXAPO)
			{
				pXAPO->Release();
				pXAPO = nullptr;
			}

			if (masteringVoice)
			{
				masteringVoice->DestroyVoice();
				masteringVoice = nullptr;
			}

			if (xAudio2)
			{
				xAudio2->Release();
				xAudio2 = nullptr;
			}
		}
Example #7
0
	void close()
	{
		if( sVoice ) {
			if( !paused ) {
				sVoice->Stop( 0 );
			}
			sVoice->DestroyVoice();
		}

		if( mVoice ) {
			mVoice->DestroyVoice();
		}

		if( xaud ) {
			xaud->Release();
			xaud = NULL;
		}

		delete [] sample_buffer;
		sample_buffer = NULL;
		delete [] samples_in_buffer;
		samples_in_buffer = NULL;
	}
Example #8
0
bool XAudio2_Output::init(long sampleRate)
{
	if( failed || initialized ) return false;

	HRESULT hr;

	// Initialize XAudio2
	UINT32 flags = 0;
//#ifdef _DEBUG
//	flags = XAUDIO2_DEBUG_ENGINE;
//#endif

	hr = XAudio2Create( &xaud, flags );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_FAILURE, NULL );
		failed = true;
		return false;
	}


	freq = sampleRate;

	// calculate the number of samples per frame first
	// then multiply it with the size of a sample frame (16 bit * stereo)
	soundBufferLen = ( freq / 60 ) * 4;

	// create own buffers to store sound data because it must not be
	// manipulated while the voice plays from it
	buffers = (BYTE *)malloc( ( bufferCount + 1 ) * soundBufferLen );
	// + 1 because we need one temporary buffer when all others are in use

	WAVEFORMATEX wfx;
	ZeroMemory( &wfx, sizeof( wfx ) );
	wfx.wFormatTag = WAVE_FORMAT_PCM;
	wfx.nChannels = 2;
	wfx.nSamplesPerSec = freq;
	wfx.wBitsPerSample = 16;
	wfx.nBlockAlign = wfx.nChannels * ( wfx.wBitsPerSample / 8 );
	wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;


	// create sound receiver
	hr = xaud->CreateMasteringVoice(
		&mVoice,
		XAUDIO2_DEFAULT_CHANNELS,
		XAUDIO2_DEFAULT_SAMPLERATE,
		0,
		theApp.xa2Device,
		NULL );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_CANNOT_CREATE_MASTERINGVOICE, NULL );
		failed = true;
		return false;
	}


	// create sound emitter
	hr = xaud->CreateSourceVoice( &sVoice, &wfx, 0, 4.0f, &notify );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_CANNOT_CREATE_SOURCEVOICE, NULL );
		failed = true;
		return false;
	}


	if( theApp.xa2Upmixing ) {
		// set up stereo upmixing
		XAUDIO2_DEVICE_DETAILS dd;
		ZeroMemory( &dd, sizeof( dd ) );
		hr = xaud->GetDeviceDetails( 0, &dd );
		ASSERT( hr == S_OK );
		float *matrix = NULL;
		matrix = (float*)malloc( sizeof( float ) * 2 * dd.OutputFormat.Format.nChannels );
        if( matrix == NULL ) return false;
		bool matrixAvailable = true;
		switch( dd.OutputFormat.Format.nChannels ) {
			case 4: // 4.0
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Back  L*/	matrix[4] = 1.0000f;  matrix[5] = 0.0000f;
	/*Back  R*/	matrix[6] = 0.0000f;  matrix[7] = 1.0000f;
				break;
			case 5: // 5.0
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*Side  L*/	matrix[6] = 1.0000f;  matrix[7] = 0.0000f;
	/*Side  R*/	matrix[8] = 0.0000f;  matrix[9] = 1.0000f;
				break;
			case 6: // 5.1
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*LFE    */	matrix[6] = 0.0000f;  matrix[7] = 0.0000f;
	/*Side  L*/	matrix[8] = 1.0000f;  matrix[9] = 0.0000f;
	/*Side  R*/	matrix[10] = 0.0000f;  matrix[11] = 1.0000f;
				break;
			case 7: // 6.1
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*LFE    */	matrix[6] = 0.0000f;  matrix[7] = 0.0000f;
	/*Side  L*/	matrix[8] = 1.0000f;  matrix[9] = 0.0000f;
	/*Side  R*/	matrix[10] = 0.0000f;  matrix[11] = 1.0000f;
	/*Back  C*/	matrix[12] = 0.7071f;  matrix[13] = 0.7071f;
				break;
			case 8: // 7.1
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*LFE    */	matrix[6] = 0.0000f;  matrix[7] = 0.0000f;
	/*Back  L*/	matrix[8] = 1.0000f;  matrix[9] = 0.0000f;
	/*Back  R*/	matrix[10] = 0.0000f;  matrix[11] = 1.0000f;
	/*Side  L*/	matrix[12] = 1.0000f;  matrix[13] = 0.0000f;
	/*Side  R*/	matrix[14] = 0.0000f;  matrix[15] = 1.0000f;
				break;
			default:
				matrixAvailable = false;
				break;
		}
		if( matrixAvailable ) {
			hr = sVoice->SetOutputMatrix( NULL, 2, dd.OutputFormat.Format.nChannels, matrix );
			ASSERT( hr == S_OK );
		}
		free( matrix );
		matrix = NULL;
	}


	hr = sVoice->Start( 0 );
	ASSERT( hr == S_OK );
	playing = true;

	currentBuffer = 0;
	device_changed = false;

	initialized = true;
	return true;
}
Example #9
0
BOOL XAudio2_Config::OnInitDialog()
{
	CDialog::OnInitDialog();

	m_combo_dev.ResetContent();

	m_slider_buffer.SetRange( 2, 10, FALSE );
	m_slider_buffer.SetTicFreq( 1 );
	m_slider_buffer.SetPos( (int)m_buffer_count );

	CString info;
	int pos = m_slider_buffer.GetPos();
	info.Format( _T("%i frames = %.2f ms"), pos, (float)pos / 60.0f * 1000.0f );
	m_info_buffer.SetWindowText( info );

	HRESULT hr;
	IXAudio2 *xa = NULL;
	UINT32 flags = 0;
#ifdef _DEBUG
	flags = XAUDIO2_DEBUG_ENGINE;
#endif

	hr = XAudio2Create( &xa, flags );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_FAILURE, NULL );
	} else {
		UINT32 dev_count = 0;
		hr = xa->GetDeviceCount( &dev_count );
		if( hr != S_OK ) {
			systemMessage( IDS_XAUDIO2_CANNOT_ENUMERATE_DEVICES, NULL );
		} else {
			XAUDIO2_DEVICE_DETAILS dd;
			for( UINT32 i = 0; i < dev_count; i++ ) {
				hr = xa->GetDeviceDetails( i, &dd );
				if( hr != S_OK ) {
					continue;
				} else {
#ifdef _UNICODE
					int id = m_combo_dev.AddString( dd.DisplayName );
#else
					CHAR temp[256];
					ZeroMemory( temp, sizeof( temp ) );
					WideCharToMultiByte(
						CP_ACP,
						WC_NO_BEST_FIT_CHARS,
						dd.DisplayName,
						-1,
						temp,
						sizeof( temp ) - 1,
						NULL,
						NULL );
					
					int id = m_combo_dev.AddString( temp );
#endif
					if( id < 0 ) {
						systemMessage( IDS_XAUDIO2_CANNOT_ENUMERATE_DEVICES, NULL );
						break;
					} else {
						m_combo_dev.SetItemData( id, i );
					}
				}
			}

			// select the currently configured device {
			int count = m_combo_dev.GetCount();
			if( count > 0 ) {
				for( int i = 0; i < count; i++ ) {
					if( m_combo_dev.GetItemData( i ) == m_selected_device_index ) {
						m_combo_dev.SetCurSel( i );
						break;
					}
				}
			}
			// }

		}
		xa->Release();
		xa = NULL;
	}

	return TRUE;  // return TRUE unless you set the focus to a control
	// EXCEPTION: OCX Property Pages should return FALSE
}
DWORD CWASAPICapture::DoCaptureThread()
{
    HANDLE mmcssHandle = NULL;

    IXAudio2* xaudio = 0;
    IXAudio2MasteringVoice* mastering_voice = 0;

    IXAudio2SourceVoice* source_voice = 0;

    try {

        bool stillPlaying = true;
        DWORD mmcssTaskIndex = 0;

        HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
        if (FAILED(hr))
        {
            printf_s("Unable to initialize COM in render thread: %x\n", hr);
            return hr;
        }

    
        mmcssHandle = AvSetMmThreadCharacteristics(L"Audio", &mmcssTaskIndex);
        if (mmcssHandle == NULL)
        {
            printf_s("Unable to enable MMCSS on capture thread: %d\n", GetLastError());
        }

        //
        //  XAudioの初期化
        //
        {
            UINT32 flags = 0;
#ifdef _DEBUG
            flags |= XAUDIO2_DEBUG_ENGINE;
#endif
            if( FAILED( hr = XAudio2Create( &xaudio, flags ) ) )
                throw "XAudio2Create";

            //  Create a mastering voice
            if( FAILED( hr = xaudio->CreateMasteringVoice( &mastering_voice ) ) )
                throw "CreateMasteringVoice";

            //  WAVファイルのWAVEFORMATEXを使ってSourceVoiceを作成
            if( FAILED( xaudio->CreateSourceVoice( &source_voice, MixFormat() ) ) )
                throw "CreateSourceVoice";

            //  再生
            source_voice->Start();

        }

        while (stillPlaying)
        {
            HRESULT hr;
            //
            //  In Timer Driven mode, we want to wait for half the desired latency in milliseconds.
            //
            //  That way we'll wake up half way through the processing period to pull the 
            //  next set of samples from the engine.
            //
		    DWORD waitResult = WaitForSingleObject(_ShutdownEvent, _EngineLatencyInMS / 2);
            switch (waitResult)
            {
            case WAIT_OBJECT_0 + 0:     // _ShutdownEvent
                stillPlaying = false;       // We're done, exit the loop.
                break;        
            case WAIT_TIMEOUT:          // Timeout
                //
                //  We need to retrieve the next buffer of samples from the audio capturer.
                //
                BYTE *pData;
                UINT32 framesAvailable;
                DWORD  flags;

                //
                //  Find out how much capture data is available.  We need to make sure we don't run over the length
                //  of our capture buffer.  We'll discard any samples that don't fit in the buffer.
                //
                hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL);
                if (SUCCEEDED(hr))
                {
                    UINT32 framesToCopy = min(framesAvailable, static_cast<UINT32>((_CaptureBufferSize - _CurrentCaptureIndex) / _FrameSize));
                    if (framesToCopy != 0)
                    {
                        //
                        //  The flags on capture tell us information about the data.
                        //
                        //  We only really care about the silent flag since we want to put frames of silence into the buffer
                        //  when we receive silence.  We rely on the fact that a logical bit 0 is silence for both float and int formats.
                        //
                        if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
                        {
                            //
                            //  Fill 0s from the capture buffer to the output buffer.
                            //
                            ZeroMemory(&_CaptureBuffer[_CurrentCaptureIndex], framesToCopy*_FrameSize);
                        }
                        else
                        {
                            //
                            //  Copy data from the audio engine buffer to the output buffer.
                            //
                            CopyMemory(&_CaptureBuffer[_CurrentCaptureIndex], pData, framesToCopy*_FrameSize);

                            // SourceVoiceにデータを送信
                            XAUDIO2_BUFFER buffer = { 0 };
                            buffer.AudioBytes = framesToCopy * _FrameSize;  //バッファのバイト数
                            buffer.pAudioData = &pData[ 0 ];                //バッファの先頭アドレス
                            source_voice->SubmitSourceBuffer( &buffer );
                        }
                        //
                        //  Bump the capture buffer pointer.
                        //
                        _CurrentCaptureIndex += framesToCopy*_FrameSize;
                    }
                    hr = _CaptureClient->ReleaseBuffer(framesAvailable);
                    if (FAILED(hr))
                    {
                        printf_s("Unable to release capture buffer: %x!\n", hr);
                    }
                }
                break;
            }
        }
    }
    catch( const char* e )
    {
        std::cout << e << std::endl;
    }

    //  Cleanup XAudio2
    if( mastering_voice != 0 ) {
        // ここで落ちる
        //mastering_voice->DestroyVoice();
        mastering_voice = 0;
    }

    if( xaudio != 0 ) {
        // ここでも落ちる
        //xaudio->Release();
        xaudio = 0;
    }

    AvRevertMmThreadCharacteristics(mmcssHandle);
    
    CoUninitialize();
    return 0;
}