Ejemplo n.º 1
0
	virtual const char* open( void * hwnd, unsigned sample_rate, unsigned nch, unsigned max_samples_per_frame, unsigned num_frames )
	{
		this->hwnd = hwnd;
		this->sample_rate = sample_rate;
		this->nch = nch;
		this->max_samples_per_frame = max_samples_per_frame;
		this->num_frames = num_frames;

#ifdef HAVE_KS_HEADERS
		WAVEFORMATEXTENSIBLE wfx;
		wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
		wfx.Format.nChannels = nch; //1;
		wfx.Format.nSamplesPerSec = sample_rate;
		wfx.Format.nBlockAlign = 2 * nch; //2;
		wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign;
		wfx.Format.wBitsPerSample = 16;
		wfx.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
		wfx.Samples.wValidBitsPerSample = 16;
		wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
		wfx.dwChannelMask = nch == 2 ? KSAUDIO_SPEAKER_STEREO : KSAUDIO_SPEAKER_MONO;
#else
		WAVEFORMATEX wfx;
		wfx.wFormatTag = WAVE_FORMAT_PCM;
		wfx.nChannels = nch; //1;
		wfx.nSamplesPerSec = sample_rate;
		wfx.nBlockAlign = 2 * nch; //2;
		wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
		wfx.wBitsPerSample = 16;
		wfx.cbSize = 0;
#endif
        HRESULT hr = XAudio2Create( &xaud, 0 );
		if (FAILED(hr)) return "Creating XAudio2 interface";
		hr = xaud->CreateMasteringVoice(
			&mVoice,
			nch,
			sample_rate,
			0,
			NULL,
			NULL );
		if (FAILED(hr)) return "Creating XAudio2 mastering voice";
		hr = xaud->CreateSourceVoice( &sVoice, &wfx, 0, 4.0f, &notify );
		if (FAILED(hr)) return "Creating XAudio2 source voice";
		hr = sVoice->Start( 0 );
		if (FAILED(hr)) return "Starting XAudio2 voice";
		hr = sVoice->SetFrequencyRatio((float)1.0f);
		if (FAILED(hr)) return "Setting XAudio2 voice frequency ratio";
		buffered_count = 0;
		buffer_read_cursor = 0;
		buffer_write_cursor = 0;
		samples_played = 0;
		sample_buffer = new int16_t[ max_samples_per_frame * num_frames ];
		samples_in_buffer = new UINT64[ num_frames ];
		memset( samples_in_buffer, 0, sizeof( UINT64 ) * num_frames );
		return NULL;
	}
Ejemplo n.º 2
0
bool XAudio2_Output::init(long sampleRate)
{
	if( failed || initialized ) return false;

	HRESULT hr;

	// Initialize XAudio2
	UINT32 flags = 0;
//#ifdef _DEBUG
//	flags = XAUDIO2_DEBUG_ENGINE;
//#endif

	hr = XAudio2Create( &xaud, flags );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_FAILURE, NULL );
		failed = true;
		return false;
	}


	freq = sampleRate;

	// calculate the number of samples per frame first
	// then multiply it with the size of a sample frame (16 bit * stereo)
	soundBufferLen = ( freq / 60 ) * 4;

	// create own buffers to store sound data because it must not be
	// manipulated while the voice plays from it
	buffers = (BYTE *)malloc( ( bufferCount + 1 ) * soundBufferLen );
	// + 1 because we need one temporary buffer when all others are in use

	WAVEFORMATEX wfx;
	ZeroMemory( &wfx, sizeof( wfx ) );
	wfx.wFormatTag = WAVE_FORMAT_PCM;
	wfx.nChannels = 2;
	wfx.nSamplesPerSec = freq;
	wfx.wBitsPerSample = 16;
	wfx.nBlockAlign = wfx.nChannels * ( wfx.wBitsPerSample / 8 );
	wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;


	// create sound receiver
	hr = xaud->CreateMasteringVoice(
		&mVoice,
		XAUDIO2_DEFAULT_CHANNELS,
		XAUDIO2_DEFAULT_SAMPLERATE,
		0,
		theApp.xa2Device,
		NULL );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_CANNOT_CREATE_MASTERINGVOICE, NULL );
		failed = true;
		return false;
	}


	// create sound emitter
	hr = xaud->CreateSourceVoice( &sVoice, &wfx, 0, 4.0f, &notify );
	if( hr != S_OK ) {
		systemMessage( IDS_XAUDIO2_CANNOT_CREATE_SOURCEVOICE, NULL );
		failed = true;
		return false;
	}


	if( theApp.xa2Upmixing ) {
		// set up stereo upmixing
		XAUDIO2_DEVICE_DETAILS dd;
		ZeroMemory( &dd, sizeof( dd ) );
		hr = xaud->GetDeviceDetails( 0, &dd );
		ASSERT( hr == S_OK );
		float *matrix = NULL;
		matrix = (float*)malloc( sizeof( float ) * 2 * dd.OutputFormat.Format.nChannels );
        if( matrix == NULL ) return false;
		bool matrixAvailable = true;
		switch( dd.OutputFormat.Format.nChannels ) {
			case 4: // 4.0
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Back  L*/	matrix[4] = 1.0000f;  matrix[5] = 0.0000f;
	/*Back  R*/	matrix[6] = 0.0000f;  matrix[7] = 1.0000f;
				break;
			case 5: // 5.0
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*Side  L*/	matrix[6] = 1.0000f;  matrix[7] = 0.0000f;
	/*Side  R*/	matrix[8] = 0.0000f;  matrix[9] = 1.0000f;
				break;
			case 6: // 5.1
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*LFE    */	matrix[6] = 0.0000f;  matrix[7] = 0.0000f;
	/*Side  L*/	matrix[8] = 1.0000f;  matrix[9] = 0.0000f;
	/*Side  R*/	matrix[10] = 0.0000f;  matrix[11] = 1.0000f;
				break;
			case 7: // 6.1
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*LFE    */	matrix[6] = 0.0000f;  matrix[7] = 0.0000f;
	/*Side  L*/	matrix[8] = 1.0000f;  matrix[9] = 0.0000f;
	/*Side  R*/	matrix[10] = 0.0000f;  matrix[11] = 1.0000f;
	/*Back  C*/	matrix[12] = 0.7071f;  matrix[13] = 0.7071f;
				break;
			case 8: // 7.1
	//Speaker \ Left Source           Right Source
	/*Front L*/	matrix[0] = 1.0000f;  matrix[1] = 0.0000f;
	/*Front R*/	matrix[2] = 0.0000f;  matrix[3] = 1.0000f;
	/*Front C*/	matrix[4] = 0.7071f;  matrix[5] = 0.7071f;
	/*LFE    */	matrix[6] = 0.0000f;  matrix[7] = 0.0000f;
	/*Back  L*/	matrix[8] = 1.0000f;  matrix[9] = 0.0000f;
	/*Back  R*/	matrix[10] = 0.0000f;  matrix[11] = 1.0000f;
	/*Side  L*/	matrix[12] = 1.0000f;  matrix[13] = 0.0000f;
	/*Side  R*/	matrix[14] = 0.0000f;  matrix[15] = 1.0000f;
				break;
			default:
				matrixAvailable = false;
				break;
		}
		if( matrixAvailable ) {
			hr = sVoice->SetOutputMatrix( NULL, 2, dd.OutputFormat.Format.nChannels, matrix );
			ASSERT( hr == S_OK );
		}
		free( matrix );
		matrix = NULL;
	}


	hr = sVoice->Start( 0 );
	ASSERT( hr == S_OK );
	playing = true;

	currentBuffer = 0;
	device_changed = false;

	initialized = true;
	return true;
}
Ejemplo n.º 3
0
DWORD CWASAPICapture::DoCaptureThread()
{
    HANDLE mmcssHandle = NULL;

    IXAudio2* xaudio = 0;
    IXAudio2MasteringVoice* mastering_voice = 0;

    IXAudio2SourceVoice* source_voice = 0;

    try {

        bool stillPlaying = true;
        DWORD mmcssTaskIndex = 0;

        HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
        if (FAILED(hr))
        {
            printf_s("Unable to initialize COM in render thread: %x\n", hr);
            return hr;
        }

    
        mmcssHandle = AvSetMmThreadCharacteristics(L"Audio", &mmcssTaskIndex);
        if (mmcssHandle == NULL)
        {
            printf_s("Unable to enable MMCSS on capture thread: %d\n", GetLastError());
        }

        //
        //  XAudioの初期化
        //
        {
            UINT32 flags = 0;
#ifdef _DEBUG
            flags |= XAUDIO2_DEBUG_ENGINE;
#endif
            if( FAILED( hr = XAudio2Create( &xaudio, flags ) ) )
                throw "XAudio2Create";

            //  Create a mastering voice
            if( FAILED( hr = xaudio->CreateMasteringVoice( &mastering_voice ) ) )
                throw "CreateMasteringVoice";

            //  WAVファイルのWAVEFORMATEXを使ってSourceVoiceを作成
            if( FAILED( xaudio->CreateSourceVoice( &source_voice, MixFormat() ) ) )
                throw "CreateSourceVoice";

            //  再生
            source_voice->Start();

        }

        while (stillPlaying)
        {
            HRESULT hr;
            //
            //  In Timer Driven mode, we want to wait for half the desired latency in milliseconds.
            //
            //  That way we'll wake up half way through the processing period to pull the 
            //  next set of samples from the engine.
            //
		    DWORD waitResult = WaitForSingleObject(_ShutdownEvent, _EngineLatencyInMS / 2);
            switch (waitResult)
            {
            case WAIT_OBJECT_0 + 0:     // _ShutdownEvent
                stillPlaying = false;       // We're done, exit the loop.
                break;        
            case WAIT_TIMEOUT:          // Timeout
                //
                //  We need to retrieve the next buffer of samples from the audio capturer.
                //
                BYTE *pData;
                UINT32 framesAvailable;
                DWORD  flags;

                //
                //  Find out how much capture data is available.  We need to make sure we don't run over the length
                //  of our capture buffer.  We'll discard any samples that don't fit in the buffer.
                //
                hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL);
                if (SUCCEEDED(hr))
                {
                    UINT32 framesToCopy = min(framesAvailable, static_cast<UINT32>((_CaptureBufferSize - _CurrentCaptureIndex) / _FrameSize));
                    if (framesToCopy != 0)
                    {
                        //
                        //  The flags on capture tell us information about the data.
                        //
                        //  We only really care about the silent flag since we want to put frames of silence into the buffer
                        //  when we receive silence.  We rely on the fact that a logical bit 0 is silence for both float and int formats.
                        //
                        if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
                        {
                            //
                            //  Fill 0s from the capture buffer to the output buffer.
                            //
                            ZeroMemory(&_CaptureBuffer[_CurrentCaptureIndex], framesToCopy*_FrameSize);
                        }
                        else
                        {
                            //
                            //  Copy data from the audio engine buffer to the output buffer.
                            //
                            CopyMemory(&_CaptureBuffer[_CurrentCaptureIndex], pData, framesToCopy*_FrameSize);

                            // SourceVoiceにデータを送信
                            XAUDIO2_BUFFER buffer = { 0 };
                            buffer.AudioBytes = framesToCopy * _FrameSize;  //バッファのバイト数
                            buffer.pAudioData = &pData[ 0 ];                //バッファの先頭アドレス
                            source_voice->SubmitSourceBuffer( &buffer );
                        }
                        //
                        //  Bump the capture buffer pointer.
                        //
                        _CurrentCaptureIndex += framesToCopy*_FrameSize;
                    }
                    hr = _CaptureClient->ReleaseBuffer(framesAvailable);
                    if (FAILED(hr))
                    {
                        printf_s("Unable to release capture buffer: %x!\n", hr);
                    }
                }
                break;
            }
        }
    }
    catch( const char* e )
    {
        std::cout << e << std::endl;
    }

    //  Cleanup XAudio2
    if( mastering_voice != 0 ) {
        // ここで落ちる
        //mastering_voice->DestroyVoice();
        mastering_voice = 0;
    }

    if( xaudio != 0 ) {
        // ここでも落ちる
        //xaudio->Release();
        xaudio = 0;
    }

    AvRevertMmThreadCharacteristics(mmcssHandle);
    
    CoUninitialize();
    return 0;
}