コード例 #1
0
int main (int argc, char **argv) {

    #pragma region // --- my init ---
    float pitchnow = 1.0;
    alListener3f(AL_POSITION, 0.0, 0.0, 0.0);

    #pragma endregion

    #pragma region // --- al init ---
// alut の初期化

    alutInit (&argc, argv);

// Hello World としゃべる音声の作成
    ALuint helloBuffer = alutCreateBufferHelloWorld();
    ALuint MusicBuffer = alutCreateBufferFromFile(FILENAME);
    if(AL_NONE == helloBuffer) {
        std::cerr<<"error:nofile"<<std::endl;
        exit(1);
    }

// ソースの作成
    ALuint helloSource;
    alGenSources (1, &helloSource);
    ALuint MusicSource;
    alGenSources (1, &MusicSource);


// ソースにバッファをバインド
    alSourcei (helloSource, AL_BUFFER, helloBuffer);
    alSourcei (MusicSource, AL_BUFFER, MusicBuffer);
    #pragma endregion

    #pragma region // --- effect init ---

    ALCdevice *pDevice = alcOpenDevice(NULL);
    if(!pDevice) {
        std::cerr<<"error : device not found"<<std::endl;
        exit(1);
    }

    LPALGENEFFECTS alGenEffects=(LPALGENEFFECTS)alGetProcAddress("alGenEffects");


    #pragma region // --- set Reverb ---
    ALuint Effect = 0;
    alGenEffects(1, &Effect);
    alEffecti(Effect, AL_EFFECT_TYPE, AL_EFFECT_REVERB);

    #pragma endregion


    #pragma endregion

    cv::namedWindow("hoge");
    alSourcePlay(MusicSource);

    alSource3f(MusicSource, AL_POSITION, 100.0, 0.0, 0.0);
    alSource3f(MusicSource, AL_VELOCITY, 10.0, 0.0, 0.0);

    while(1) {

        char key = cv::waitKey(1);
        if(key=='s') {
            alSourcePlay(helloSource);
            //alutSleep(1);
        }
        if(key == 'p') {
            int state;
            alGetSourcei(MusicSource, AL_SOURCE_STATE, &state);
            if(state ==AL_PAUSED)alSourcePlay(MusicSource);
            else alSourcePause(MusicSource);
        }
        else if(key == 'q') {
            std::cout<<"good bye"<<std::endl;
            break;
        }
        else if(key == 'u') {
            pitchnow *= 2;
            alSourcef(MusicSource, AL_PITCH, pitchnow);
        }
        else if(key == 'd') {
            pitchnow /= 2;
            alSourcef(MusicSource, AL_PITCH, pitchnow);
        }

        // roop
        int state;
        alGetSourcei(MusicSource, AL_SOURCE_STATE, &state);
        if(state != AL_PLAYING) alSourcePlay(MusicSource);

    }


    #pragma region --- release ---
    // リソースを開放
    alSourceStop(helloSource);
    alDeleteSources( 1, &helloSource );
    alDeleteBuffers( 1, &helloBuffer );
    alSourceStop(MusicSource);
    alDeleteSources( 1, &MusicSource );
    alDeleteBuffers( 1, &MusicBuffer );
    alutExit ();
    #pragma endregion

    return 0;
}
コード例 #2
0
void SoundManager::stopMetronome()
{
	alSourceStop(source_[0]);
}
コード例 #3
0
ファイル: title.cpp プロジェクト: lookin4/MonsterBreeder
void opening() {
	if (opening_init == 0) {
		opening_init = 1;
		alSourcePlay(init_music->source);
	}
	glMatrixMode(GL_PROJECTION);          /* 投影変換行列の設定 */
	glLoadIdentity();                     /* 変換行列の初期化 */
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	glOrtho(
		0,//GLdouble left,
		300,//GLdouble right,
		0,//GLdouble bottom,
		300,//GLdouble top,
		1,//GLdouble zNear, 
		-1//GLdouble zFar
		);


	init_image->changeImage();

	glColor4f(1, 1, 1, 1);
	glBegin(GL_QUADS);
	{
		glTexCoord2f(0.f, 0.f);
		glVertex2f(0, 300);
		glTexCoord2f(0.f, 1.f);
		glVertex2f(0, 0);
		glTexCoord2f(1.f, 1.f);
		glVertex2f(300, 0);
		glTexCoord2f(1.f, 0.f);
		glVertex2f(300, 300);
	}
	glEnd();

	if (o_message != NULL) {
		o_message();
	}
	else {
		glColor3f(0.7f, 0.5f, 0);
		max_font->ChangeSize(lkn::TYPE_MAX);
		max_font->DrawStringW(10, 160, L"はじめの出会い");
		font->ChangeSize(lkn::TYPE_NORMAL);
		glColor3f(1, 0, 0);
		font->DrawStringW(100, 110, L"〜ファームを救え!〜");
	}

	glDisable(GL_TEXTURE_2D);

    font->ChangeSize(lkn::TYPE_NORMAL);

	if (lkn::InputManager::getInstance()->keyPless(0x0d)) {

		if (o_message == NULL) {
			o_message = hajimemasite;
		}
		else if(o_message == hajimemasite) {
			o_message = kyuudakedo;
		}
		else if (o_message == kyuudakedo) {
			o_message = sokode;
		}
		else if (o_message == sokode) {
			o_message = tasukete;
		}
		else if (o_message == tasukete) {
			o_message = yorosikune;
		}
		else if (o_message == yorosikune) {
			o_message = NULL;
			alSourceStop(init_music->source);
			func = farm;
		}
	}

	glColor3f(0, 0, 0);
	min_font->ChangeSize(lkn::TYPE_MIN);
	min_font->DrawStringW(10, 285, L"      Enter:決定");

}
コード例 #4
0
ファイル: Sound.cpp プロジェクト: BlueCobold/SFML
void Sound::stop()
{
    alCheck(alSourceStop(m_source));
}
コード例 #5
0
ファイル: sound.cpp プロジェクト: Garfeild/qtpim
void Sound::Stop()
{
  alSourceStop(Source);
}
コード例 #6
0
ファイル: server.c プロジェクト: bhoomil-avch/IPcam
static void *Audio_Rx_loop()
{
	int Rx_socket;
	struct sockaddr_in Rx_addr;

	/* Socket init */
	if(Rx_socket_init(&Rx_socket, &Rx_addr, AUDIO_NET_PORT) ==0) {
		fprintf(stderr, "Rx Socket init \n");
		return NULL;
	}
	printf("Audio Rx socket init finish\n");

	ALCdevice *PlayDevice ;
	ALCcontext* PlayContext ;
	ALuint PlaySource; 
	ALuint PlayBuffer[AUDIO_Rx_PLAY_BUFFER_COUNT]; 

	PlayDevice = alcOpenDevice(NULL);
	PlayContext = alcCreateContext(PlayDevice, NULL); 

	alcMakeContextCurrent(PlayContext); 

	alGenSources(1, &PlaySource);
	alGenBuffers(AUDIO_Rx_PLAY_BUFFER_COUNT, PlayBuffer); 


	/*----------------------------*/
	/* fftw init */
	double *FT_in;
	double *IFT_in;
	fftw_complex *FT_out;
	fftw_complex *FT_out_tmp;
	fftw_plan FT_plan;
	fftw_plan IFT_plan;
	const double FOURIER_NUMBER = (floor( (double)AUDIO_SAMPLE_RATE*0.5 )+1); 

	FT_in =  (double*) fftw_malloc(sizeof(double) * (int)AUDIO_SAMPLE_RATE);
	IFT_in = (double*) fftw_malloc(sizeof(double) *(int) AUDIO_SAMPLE_RATE);
	FT_out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (int)FOURIER_NUMBER);
	FT_plan =  fftw_plan_dft_r2c_1d(AUDIO_SAMPLE_RATE, FT_in, FT_out, (int)FFTW_ESTIMATE);
	IFT_plan = fftw_plan_dft_c2r_1d(AUDIO_SAMPLE_RATE, FT_out,IFT_in, (int)FFTW_ESTIMATE);


	int Buf_counter =0;
	unsigned short play_Audio_buf[AUDIO_SAMPLE_RATE];
	int recv_len;
	int ID;
	int remain_size = AUDIO_SAMPLE_RATE * sizeof(short);
	unsigned char Rx_Buffer[AUDIO_BUFFER_SIZE];
	int i;

	fd_set fds;
	struct timeval tv;
	int r;

	printf("Audio Tx Looping\n");
	while(sys_get_status() != SYS_STATUS_RELEASE) {
		/* start Tx Audeo */
		while(sys_get_status() == SYS_STATUS_WORK) {

			/* Timeout. */
			FD_ZERO(&fds);
			FD_SET(Rx_socket, &fds);
			tv.tv_sec = 2;
			tv.tv_usec = 0;

			r = select(Rx_socket + 1, &fds, NULL, NULL, &tv);
			if (r == -1) {
				fprintf(stderr, "select");
				goto AUDIO_RX_RELEASE;
			}
			if (r == 0) {
				//fprintf(stderr, "select timeout\n");
				/* Timeout to clear FrameBuffer */
				continue;
			}


			/*Receive Data */
			recv_len = recv(Rx_socket, (char*)&Rx_Buffer, sizeof(Rx_Buffer) , 0) ;
			if(recv_len == -1) {
				fprintf(stderr ,"Stream data recv() error\n");
				break ;
			}
			else {
				ID = *((int *)Rx_Buffer) ;
				memcpy((char *)play_Audio_buf + ID * 1024, &Rx_Buffer[4] , recv_len-4);
				remain_size -= (recv_len-4);

				/* receive finish */
				if(remain_size <= 0) {
					for(i = 0; i < AUDIO_SAMPLE_RATE; i++) {
						*(FT_in + i) = play_Audio_buf[i] * 128;
					}

					fftw_execute(FT_plan);

					for(i = 0 ; i < 300 ; i++) {
						FT_out[i][0] = 0;
						FT_out[i][1] = 0;
					}
					for(i = 7000 ; i < FOURIER_NUMBER ; i++) {
						FT_out[i][0] = 0;
						FT_out[i][1] = 0;
					}

					fftw_execute(IFT_plan);

					for(i = 0; i < AUDIO_SAMPLE_RATE; i++) {
						play_Audio_buf[i] = (IFT_in[i] /AUDIO_SAMPLE_RATE) * 10 + 15000;
						if(play_Audio_buf[i] > 32767) play_Audio_buf[i] =32767;
					}


					/* Play Audio */
					alBufferData(PlayBuffer[Buf_counter], AL_FORMAT_MONO16, (ALvoid *)play_Audio_buf ,sizeof(short) * 44100, 44100);
					alSourceStop(PlaySource);
					alSourcei(PlaySource, AL_BUFFER, PlayBuffer[Buf_counter]); 
					alSourcePlay(PlaySource);
					Buf_counter++;
					if(Buf_counter >= 10) Buf_counter = 0;
					remain_size = AUDIO_SAMPLE_RATE * sizeof(short);
				}
			}
		}
		usleep(50000);
	}

AUDIO_RX_RELEASE :
	/*------------------------------------------*/
	alcCloseDevice(PlayDevice);
	alcDestroyContext(PlayContext);
	alDeleteSources(1, &PlaySource); 
	alDeleteBuffers(AUDIO_Rx_PLAY_BUFFER_COUNT, PlayBuffer); 
	 /*------------------------------------------*/
	printf("Audio Rx finish\n");
	pthread_exit(NULL);
}
コード例 #7
0
ファイル: audio.cpp プロジェクト: 600rr/tdesktop
void VoiceMessagesFader::onTimer() {
	bool hasFading = false, hasPlaying = false;
	QMutexLocker lock(&voicemsgsMutex);
	VoiceMessages *voice = audioVoice();
	if (!voice) return;

	for (int32 i = 0; i < AudioVoiceMsgSimultaneously; ++i) {
		VoiceMessages::Msg &m(voice->_data[i]);
		if (m.state == VoiceMessageStopped || m.state == VoiceMessagePaused || !m.source) continue;

		bool playing = false, fading = false;
		ALint pos = 0;
		ALint state = AL_INITIAL;
		alGetSourcei(m.source, AL_SAMPLE_OFFSET, &pos);
		alGetSourcei(m.source, AL_SOURCE_STATE, &state);
		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			emit error(m.audio);
		} else {
			switch (m.state) {
			case VoiceMessageFinishing:
			case VoiceMessagePausing:
			case VoiceMessageStarting:
			case VoiceMessageResuming:
				fading = true;
			break;
			case VoiceMessagePlaying:
				playing = true;
			break;
			}
			if (fading && (state == AL_PLAYING || !m.loading)) {
				if (state != AL_PLAYING) {
					fading = false;
					if (m.source) {
						alSourcef(m.source, AL_GAIN, 1);
						alSourceStop(m.source);
					}
					m.state = VoiceMessageStopped;
					emit audioStopped(m.audio);
				} else if (1000 * (pos + m.skipStart - m.started) >= AudioFadeDuration * AudioVoiceMsgFrequency) {
					fading = false;
					alSourcef(m.source, AL_GAIN, 1);
					switch (m.state) {
					case VoiceMessageFinishing: alSourceStop(m.source); m.state = VoiceMessageStopped; break;
					case VoiceMessagePausing: alSourcePause(m.source); m.state = VoiceMessagePaused; break;
					case VoiceMessageStarting:
					case VoiceMessageResuming:
						m.state = VoiceMessagePlaying;
						playing = true;
					break;
					}
				} else {
					float64 newGain = 1000. * (pos + m.skipStart - m.started) / (AudioFadeDuration * AudioVoiceMsgFrequency);
					if (m.state == VoiceMessagePausing || m.state == VoiceMessageFinishing) {
						newGain = 1. - newGain;
					}
					if (newGain < 0) {
						int a = 0, b;
						b = a;
					}
					alSourcef(m.source, AL_GAIN, newGain);
					LOG(("Now volume is: %1").arg(newGain));
				}
			} else if (playing && (state == AL_PLAYING || !m.loading)) {
				if (state != AL_PLAYING) {
					playing = false;
					if (m.source) {
						alSourceStop(m.source);
						alSourcef(m.source, AL_GAIN, 1);
					}
					m.state = VoiceMessageStopped;
					emit audioStopped(m.audio);
				}
			}
			if (pos + m.skipStart - m.position >= AudioCheckPositionDelta) {
				m.position = pos + m.skipStart;
				emit playPositionUpdated(m.audio);
			}
			if (!m.loading && m.skipEnd > 0 && m.position + AudioPreloadSamples + m.skipEnd > m.duration) {
				m.loading = true;
				emit needToPreload(m.audio);
			}
			if (playing) hasPlaying = true;
			if (fading) hasFading = true;
		}
	}
	if (hasFading) {
		_timer.start(AudioFadeTimeout);
	} else if (hasPlaying) {
		_timer.start(AudioCheckPositionTimeout);
	}
}
コード例 #8
0
ファイル: SoundEffect.cpp プロジェクト: iamcsharper/nxna
	void SoundEffectInstance::Stop()
	{
#ifdef NXNA_AUDIOENGINE_OPENAL
		alSourceStop(m_source);
#endif
	}
コード例 #9
0
ファイル: SoundManager.cpp プロジェクト: arian487/WetChess
void SoundManager::Stop(int src_index) {
  AL_CHECK_ERR(alSourceStop(sources[src_index]));
}
コード例 #10
0
ファイル: GOSound.cpp プロジェクト: muhaos/MHEngine
void GOSound::stop() {
	if (canPlay)
		alSourceStop(source);
}
コード例 #11
0
/// stops playing, play will start at the beginning of the sound 
void cSoundSourceOpenAl::Stop(){alSourceStop(miId);CheckOpenAl();}
コード例 #12
0
ファイル: Sound.cpp プロジェクト: GuillaumeBelz/NazaraEngine
void NzSound::Stop()
{
	alSourceStop(m_source);
}
コード例 #13
0
ファイル: Audio.cpp プロジェクト: bruni68510/Joyau
void AudioObject::stopSource()
{
   alSourceStop(source);
}
コード例 #14
0
ファイル: title.cpp プロジェクト: lookin4/MonsterBreeder
void title() {
	if (title_init == 0) {
		title_init = 1;
		alSourcePlay(opening_music->source);

		target->y = 0;
		target->x = 0;
		target->z = 0;

		eye->y = 0;
		eye->x = 0;
		eye->z = 1000;
		frame = 0;

		opening_init = 0;

		player->nengetu = 12;

		wcscpy(suezo->waza[0]->name, L"つばはき");
		wcscpy(suezo->waza[1]->name, L"しっぽアタック");
		wcscpy(monorisu->waza[0]->name, L"ビーム");
		wcscpy(monorisu->waza[1]->name, L"たおれこみ");

		suezo->waza[0]->image = tuba_image;
		suezo->waza[1]->image = sippo_image;
		monorisu->waza[0]->image = biimu_image;
		monorisu->waza[1]->image = taore_image;

		wcscpy(suezo->name, L"すえぞー");
		wcscpy(monorisu->name, L"ものりす");


		monster = suezo;

		enemy = monorisu;

		monster->type = game::PLAYER;
		enemy->type = game::ENEMY;

	}

	glColor4f(1, 1, 1, 1);
	title_background_image->changeImage();
	glBegin(GL_QUADS);
	{
		glTexCoord2f(0.f, 0.f);
		glVertex2f(-430.f, 430.f);
		glTexCoord2f(0.f, 1.f);
		glVertex2f(-430.f, -430.f);
		glTexCoord2f(1.f, 1.f);
		glVertex2f(430.f, -430.f);
		glTexCoord2f(1.f, 0.f);
		glVertex2f(430.f, 430.f);
	}
	glEnd();



	title_image->changeImage();

	glEnable(GL_BLEND);
	glBlendFunc(
		GL_SRC_ALPHA,
		GL_ONE_MINUS_SRC_ALPHA
		);

	glPushMatrix();
	if (frame < 200) {
		if (lkn::InputManager::getInstance()->keyPless(0x0d)) {
			frame = 200;
		}
		title_movie = 200 + (400 - frame * 2);
	}
	glTranslatef(0, title_movie, 0);
	glBegin(GL_QUADS);
	{
		glTexCoord2f(0.f, 0.f);
		glVertex2f(-250.f, 250.f);
		glTexCoord2f(0.f, 1.f);
		glVertex2f(-250.f, -250.f);
		glTexCoord2f(1.f, 1.f);
		glVertex2f(250.f, -250.f);
		glTexCoord2f(1.f, 0.f);
		glVertex2f(250.f, 250.f); 	}
	glEnd();
	glPopMatrix();

	if (frame >= 200) {
		if (lkn::InputManager::getInstance()->keyPless('w')) {
			alSourcePlay(select_music->source);
			title_count++;
		}
		if (lkn::InputManager::getInstance()->keyPless('s')) {
			alSourcePlay(select_music->source);
			title_count++;
		}
		title_count = (TITLE_MAX + title_count) % TITLE_MAX;

		glBlendFunc(
			GL_ONE,
			GL_ONE
			);
		float hajimekara = (1 - title_count)*0.3f + 0.1f;
		float owari = title_count * 0.3f + 0.1f;
		glDisable(GL_TEXTURE);
		glColor4f(hajimekara, hajimekara, hajimekara, 1);
		glBegin(GL_QUADS);
		{
			glVertex2f(-70.f, -140.f);
			glVertex2f(70.f, -140.f);
			glVertex2f(70.f, -210.f);
			glVertex2f(-70.f, -210.f);
		}
		glColor4f(owari, owari, owari, 1);
		{
			glVertex2f(-70.f, -212.f);
			glVertex2f(70.f, -212.f);
			glVertex2f(70.f, -282.f);
			glVertex2f(-70.f, -282.f);
		}
		glEnd();
		glDisable(GL_BLEND);
		glColor3f(0.3f, 0.3f, 0.3f);
		font->ChangeSize(lkn::TYPE_NORMAL);
		font->DrawStringW(-67, -200, L" はじめから");
		font->DrawStringW(-67, -270, L" EXIT");

		if (lkn::InputManager::getInstance()->keyPless(0x0d)) {
			if (title_count == TITLE_START) {
				alSourcePlay(decision_music->source);
				alSourceStop(opening_music->source);
				title_init = 0;
				func = opening;
			}
			else if (title_count == TITLE_EXIT) {
				alSourcePlay(decision_music->source);
				exit(0);
			}
		}
	}

	glMatrixMode(GL_PROJECTION);          /* 投影変換行列の設定 */
	glLoadIdentity();                     /* 変換行列の初期化 */
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	glOrtho(
		0,//GLdouble left,
		300,//GLdouble right,
		0,//GLdouble bottom,
		300,//GLdouble top,
		1,//GLdouble zNear, 
		-1//GLdouble zFar
		);

	glColor3f(0, 0, 0);
	min_font->ChangeSize(lkn::TYPE_MIN);
	min_font->DrawStringW(10, 285, L"w:↑ s:↓ Enter:決定");
}
コード例 #15
0
ファイル: al.cpp プロジェクト: 2asoft/xray-16
//*****************************************************************************
// alSourceStop
//*****************************************************************************
//
ALAPI ALvoid ALAPIENTRY alSourceStop(ALuint sourceName)
{
    AL_VOID_FXN(alSourceStop(sourceName));
}
コード例 #16
0
ファイル: PlayStream.cpp プロジェクト: 9heart/DT3
int main()
{
	ALuint		    uiBuffers[NUMBUFFERS];
	ALuint		    uiSource;
	ALuint			uiBuffer;
	ALint			iState;
	CWaves *		pWaveLoader = NULL;
	WAVEID			WaveID;
	ALint			iLoop;
	ALint			iBuffersProcessed, iTotalBuffersProcessed, iQueuedBuffers;
	WAVEFORMATEX	wfex;
	unsigned long	ulDataSize = 0;
	unsigned long	ulFrequency = 0;
	unsigned long	ulFormat = 0;
	unsigned long	ulBufferSize;
	unsigned long	ulBytesWritten;
	void *			pData = NULL;

	// Initialize Framework
	ALFWInit();

	ALFWprintf("PlayStream Test Application\n");

	if (!ALFWInitOpenAL())
	{
		ALFWprintf("Failed to initialize OpenAL\n");
		ALFWShutdown();
		return 0;
	}

    // Generate some AL Buffers for streaming
	alGenBuffers( NUMBUFFERS, uiBuffers );

	// Generate a Source to playback the Buffers
    alGenSources( 1, &uiSource );

	// Create instance of WaveLoader class
	pWaveLoader = new CWaves();
	if ((pWaveLoader) && (SUCCEEDED(pWaveLoader->OpenWaveFile(ALFWaddMediaPath(TEST_WAVE_FILE), &WaveID))))
	{
		pWaveLoader->GetWaveSize(WaveID, &ulDataSize);
		pWaveLoader->GetWaveFrequency(WaveID, &ulFrequency);
		pWaveLoader->GetWaveALBufferFormat(WaveID, &alGetEnumValue, &ulFormat);

		// Queue 250ms of audio data
		pWaveLoader->GetWaveFormatExHeader(WaveID, &wfex);
		ulBufferSize = wfex.nAvgBytesPerSec >> 2;

		// IMPORTANT : The Buffer Size must be an exact multiple of the BlockAlignment ...
		ulBufferSize -= (ulBufferSize % wfex.nBlockAlign);

		if (ulFormat != 0)
		{
			pData = malloc(ulBufferSize);
			if (pData)
			{
				// Set read position to start of audio data
				pWaveLoader->SetWaveDataOffset(WaveID, 0);

				// Fill all the Buffers with audio data from the wavefile
				for (iLoop = 0; iLoop < 4; iLoop++)
				{
					if (SUCCEEDED(pWaveLoader->ReadWaveData(WaveID, pData, ulBufferSize, &ulBytesWritten)))
					{
						alBufferData(uiBuffers[iLoop], ulFormat, pData, ulBytesWritten, ulFrequency);
						alSourceQueueBuffers(uiSource, 1, &uiBuffers[iLoop]);
					}
				}

				// Start playing source
				alSourcePlay(uiSource);

				iTotalBuffersProcessed = 0;

				while (!ALFWKeyPress())
				{
					Sleep( SERVICE_UPDATE_PERIOD );

					// Request the number of OpenAL Buffers have been processed (played) on the Source
					iBuffersProcessed = 0;
					alGetSourcei(uiSource, AL_BUFFERS_PROCESSED, &iBuffersProcessed);

					// Keep a running count of number of buffers processed (for logging purposes only)
					iTotalBuffersProcessed += iBuffersProcessed;
					ALFWprintf("Buffers Processed %d\r", iTotalBuffersProcessed);

					// For each processed buffer, remove it from the Source Queue, read next chunk of audio
					// data from disk, fill buffer with new data, and add it to the Source Queue
					while (iBuffersProcessed)
					{
						// Remove the Buffer from the Queue.  (uiBuffer contains the Buffer ID for the unqueued Buffer)
						uiBuffer = 0;
						alSourceUnqueueBuffers(uiSource, 1, &uiBuffer);

						// Read more audio data (if there is any)
						pWaveLoader->ReadWaveData(WaveID, pData, ulBufferSize, &ulBytesWritten);
						if (ulBytesWritten)
						{
							// Copy audio data to Buffer
							alBufferData(uiBuffer, ulFormat, pData, ulBytesWritten, ulFrequency);
							// Queue Buffer on the Source
							alSourceQueueBuffers(uiSource, 1, &uiBuffer);
						}

						iBuffersProcessed--;
					}

					// Check the status of the Source.  If it is not playing, then playback was completed,
					// or the Source was starved of audio data, and needs to be restarted.
					alGetSourcei(uiSource, AL_SOURCE_STATE, &iState);
					if (iState != AL_PLAYING)
					{
						// If there are Buffers in the Source Queue then the Source was starved of audio
						// data, so needs to be restarted (because there is more audio data to play)
						alGetSourcei(uiSource, AL_BUFFERS_QUEUED, &iQueuedBuffers);
						if (iQueuedBuffers)
						{
							alSourcePlay(uiSource);
						}
						else
						{
							// Finished playing
							break;
						}
					}
				}

				// Stop the Source and clear the Queue
				alSourceStop(uiSource);
				alSourcei(uiSource, AL_BUFFER, 0);

				// Release temporary storage
				free(pData);
				pData = NULL;
			}
			else
			{
				ALFWprintf("Out of memory\n");
			}
		}
		else
		{
			ALFWprintf("Unknown Audio Buffer format\n");
		}

		// Close Wave Handle
		pWaveLoader->DeleteWaveFile(WaveID);
	}
コード例 #17
0
ファイル: SoundStream.cpp プロジェクト: Cucurbitace/attract
void SoundStream::streamData()
{
    // Create the buffers
    alCheck(alGenBuffers(BufferCount, m_buffers));
    for (int i = 0; i < BufferCount; ++i)
        m_endBuffers[i] = false;

    // Fill the queue
    bool requestStop = fillQueue();

    // Play the sound
    alCheck(alSourcePlay(m_source));

    while (m_isStreaming)
    {
        // The stream has been interrupted!
        if (SoundSource::getStatus() == Stopped)
        {
            if (!requestStop)
            {
                // Just continue
                alCheck(alSourcePlay(m_source));
            }
            else
            {
                // End streaming
                m_isStreaming = false;
            }
        }

        // Get the number of buffers that have been processed (ie. ready for reuse)
        ALint nbProcessed = 0;
        alCheck(alGetSourcei(m_source, AL_BUFFERS_PROCESSED, &nbProcessed));

        while (nbProcessed--)
        {
            // Pop the first unused buffer from the queue
            ALuint buffer;
            alCheck(alSourceUnqueueBuffers(m_source, 1, &buffer));

            // Find its number
            unsigned int bufferNum = 0;
            for (int i = 0; i < BufferCount; ++i)
                if (m_buffers[i] == buffer)
                {
                    bufferNum = i;
                    break;
                }

            // Retrieve its size and add it to the samples count
            if (m_endBuffers[bufferNum])
            {
                // This was the last buffer: reset the sample count
                m_samplesProcessed = 0;
                m_endBuffers[bufferNum] = false;
            }
            else
            {
                ALint size, bits;
                alCheck(alGetBufferi(buffer, AL_SIZE, &size));
                alCheck(alGetBufferi(buffer, AL_BITS, &bits));
                m_samplesProcessed += size / (bits / 8);
            }

            // Fill it and push it back into the playing queue
            if (!requestStop)
            {
                if (fillAndPushBuffer(bufferNum))
                    requestStop = true;
            }
        }

        // Leave some time for the other threads if the stream is still playing
        if (SoundSource::getStatus() != Stopped)
            sleep(milliseconds(10));
    }

    // Stop the playback
    alCheck(alSourceStop(m_source));

    // Unqueue any buffer left in the queue
    clearQueue();

    // Delete the buffers
    alCheck(alSourcei(m_source, AL_BUFFER, 0));
    alCheck(alDeleteBuffers(BufferCount, m_buffers));
}
コード例 #18
0
UBOOL UOpenALAudioSubsystem::PlaySound
(
	AActor*	Actor,
	INT		Slot,
	USound*	Sound,
	FVector	Location,
	FLOAT	Volume,
	FLOAT	Radius,
	FLOAT	Pitch,
	UBOOL	Looping
)
{
	guard(UOpenALAudioSubsystem::PlaySound);

	check(Radius);
	if( !Viewport || !Sound )
		return 0;

	// Allocate a new slot if requested.
	// XXX: What's the logic here?
	if( (Slot&14)==2*SLOT_None )
		Slot = 16 * --FreeSlot;

	// Compute our priority.
	FLOAT Priority = SoundPriority( Location, Volume, Radius );

	INT   Index        = -1;
	FLOAT BestPriority = Priority;
	for( INT i=0; i<NumSources; i++ )
	{
		FAudioSource& Source = Sources[i];

		// Check if the slot is already in use.
		if( (Source.Slot&~1)==(Slot&~1) )
		{
			// Stop processing if not told to override.
			if( Slot&1 )
				return 0;

			// Override the existing sound.
			Index = i;
			break;
		}

		// Find the lowest priority sound below our own priority
		// and override it. (Unless the above applies.)
		if( Source.Priority<=BestPriority )
		{
			Index = i;
			BestPriority = Source.Priority;
		}
	}

	// Didn't match an existing slot, or couldn't override a lower
	// priority sound. Give up.
	if( Index==-1 )
		return 0;

	// Stop the old sound.
	FAudioSource& Source = Sources[Index];
	alSourceStop( Source.Id );

	// And start the new sound.
	if( Sound!=(USound*)-1 )
	{
		const ALuint Id = Source.Id;
		alSourcei( Id, AL_BUFFER,		GetBufferFromUSound(Sound)->Id );
		alSourcef( Id, AL_GAIN,			Volume );
		alSourcef( Id, AL_MAX_DISTANCE,	Radius );
		alSourcef( Id, AL_PITCH,		Pitch );
		alSourcei( Id, AL_LOOPING,		Looping ? AL_TRUE : AL_FALSE );
		if( Actor == Viewport->Actor )
		{
			// Don't attentuate or position viewport actor sounds at all.
			// (These are sounds like the announcer, menu clicks, etc.)
			alSource3f(	Id, AL_POSITION,		0.f, 0.f, 0.f );
			alSource3f(	Id, AL_VELOCITY,		0.f, 0.f, 0.f );
			alSourcef(	Id, AL_ROLLOFF_FACTOR,	0.f );
			alSourcei(	Id, AL_SOURCE_RELATIVE,	AL_TRUE );
			Actor = NULL;
		}
		else
		{
			// Negate the above
			alSourcef(	Id, AL_ROLLOFF_FACTOR,	1.f );
			alSourcei(	Id, AL_SOURCE_RELATIVE,	AL_FALSE );
			// Update will override Location with Actor's Location anyways.
			// XXX: Should we use Location and not attach to Actor instead?
			if ( Actor )
			{
				alSourcefv(	Id, AL_POSITION,	&Actor->Location.X );
				alSourcefv(	Id, AL_VELOCITY,	&Actor->Velocity.X );
			}
			else
			{
				ALfloat ZeroVelocity[3] = { 0.f, 0.f, 0.f };
				alSourcefv(	Id, AL_POSITION,	&Location.X );
				alSourcefv(	Id, AL_VELOCITY,	ZeroVelocity );
			}
		}
		alSourcePlay( Id );
		Source.Fill( Actor, Sound, Slot, Location, Volume, Radius, Priority );
	}

	return 1;

	unguard;
}
コード例 #19
0
	void SimpleAudioEngine::stopEffect(unsigned int nSoundId)
	{
		alSourceStop(nSoundId);
		checkALError("stopEffect");
	}
コード例 #20
0
ファイル: audio.cpp プロジェクト: Pik-9/qTox
void Audio::stopLoop()
{
    QMutexLocker locker(&audioLock);
    alSourcei(alMainSource, AL_LOOPING, AL_FALSE);
    alSourceStop(alMainSource);
}
コード例 #21
0
ファイル: audio.cpp プロジェクト: 600rr/tdesktop
void VoiceMessagesLoader::onLoad(AudioData *audio) {
	bool started = false;
	int32 audioindex = -1;
	Loader *l = 0;
	Loaders::iterator j = _loaders.end();
	{
		QMutexLocker lock(&voicemsgsMutex);
		VoiceMessages *voice = audioVoice();
		if (!voice) return;

		for (int32 i = 0; i < AudioVoiceMsgSimultaneously; ++i) {
			VoiceMessages::Msg &m(voice->_data[i]);
			if (m.audio != audio || !m.loading) continue;

			audioindex = i;
			j = _loaders.find(audio);
			if (j != _loaders.end() && (j.value()->fname != m.fname || j.value()->data.size() != m.data.size())) {
				delete j.value();
				_loaders.erase(j);
				j = _loaders.end();
			}
			if (j == _loaders.end()) {
				l = (j = _loaders.insert(audio, new Loader())).value();
				l->fname = m.fname;
				l->data = m.data;
				
				int ret;
				if (m.data.isEmpty()) {
					l->file = op_open_file(m.fname.toUtf8().constData(), &ret);
				} else {
					l->file = op_open_memory((const unsigned char*)m.data.constData(), m.data.size(), &ret);
				}
				if (!l->file) {
					LOG(("Audio Error: op_open_file failed for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(ret));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				ogg_int64_t duration = op_pcm_total(l->file, -1);
				if (duration < 0) {
					LOG(("Audio Error: op_pcm_total failed to get full duration for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(duration));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				m.duration = duration;
				m.skipStart = 0;
				m.skipEnd = duration;
				m.position = 0;
				m.started = 0;
				started = true;
			} else {
				if (!m.skipEnd) continue;
				l = j.value();
			}
			break;
		}
	}

	if (j == _loaders.end()) {
		LOG(("Audio Error: trying to load part of audio, that is not playing at the moment"));
		emit error(audio);
		return;
	}
	if (started) {
		l->pcm_offset = op_pcm_tell(l->file);
		l->pcm_print_offset = l->pcm_offset - AudioVoiceMsgFrequency;
	}

	bool finished = false;
    DEBUG_LOG(("Audio Info: reading buffer for file '%1', data size '%2', current pcm_offset %3").arg(l->fname).arg(l->data.size()).arg(l->pcm_offset));

	QByteArray result;
	int64 samplesAdded = 0;
	while (result.size() < AudioVoiceMsgBufferSize) {
		opus_int16 pcm[AudioVoiceMsgFrequency * AudioVoiceMsgChannels];

		int ret = op_read_stereo(l->file, pcm, sizeof(pcm) / sizeof(*pcm));
		if (ret < 0) {
			{
				QMutexLocker lock(&voicemsgsMutex);
				VoiceMessages *voice = audioVoice();
				if (voice) {
					VoiceMessages::Msg &m(voice->_data[audioindex]);
					if (m.audio == audio) {
						m.state = VoiceMessageStopped;
					}
				}
			}
			LOG(("Audio Error: op_read_stereo failed, error code %1").arg(ret));
			return loadError(j);
		}

		int li = op_current_link(l->file);
		if (li != l->prev_li) {
			const OpusHead *head = op_head(l->file, li);
			const OpusTags *tags = op_tags(l->file, li);
			for (int32 ci = 0; ci < tags->comments; ++ci) {
				const char *comment = tags->user_comments[ci];
				if (opus_tagncompare("METADATA_BLOCK_PICTURE", 22, comment) == 0) {
					OpusPictureTag pic;
					int err = opus_picture_tag_parse(&pic, comment);
					if (err >= 0) {
						opus_picture_tag_clear(&pic);
					}
				}
			}
			if (!op_seekable(l->file)) {
				l->pcm_offset = op_pcm_tell(l->file) - ret;
			}
		}
		if (li != l->prev_li || l->pcm_offset >= l->pcm_print_offset + AudioVoiceMsgFrequency) {
			l->pcm_print_offset = l->pcm_offset;
		}
		l->pcm_offset = op_pcm_tell(l->file);

		if (!ret) {
			DEBUG_LOG(("Audio Info: read completed"));
			finished = true;
			break;
		}
		result.append((const char*)pcm, sizeof(*pcm) * ret * AudioVoiceMsgChannels);
		l->prev_li = li;
		samplesAdded += ret;

		{
			QMutexLocker lock(&voicemsgsMutex);
			VoiceMessages *voice = audioVoice();
			if (!voice) return;

			VoiceMessages::Msg &m(voice->_data[audioindex]);
			if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
				LOG(("Audio Error: playing changed while loading"));
				m.state = VoiceMessageStopped;
				return loadError(j);
			}
		}
	}

	QMutexLocker lock(&voicemsgsMutex);
	VoiceMessages *voice = audioVoice();
	if (!voice) return;

	VoiceMessages::Msg &m(voice->_data[audioindex]);
	if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
		LOG(("Audio Error: playing changed while loading"));
		m.state = VoiceMessageStopped;
		return loadError(j);
	}

	if (started) {
		if (m.source) {
			alSourceStop(m.source);
			for (int32 i = 0; i < 3; ++i) {
				if (m.samplesCount[i]) {
					alSourceUnqueueBuffers(m.source, 1, m.buffers + i);
					m.samplesCount[i] = 0;
				}
			}
			m.nextBuffer = 0;
		}
	}
	if (samplesAdded) {
		if (!m.source) {
			alGenSources(1, &m.source);
			alSourcef(m.source, AL_PITCH, 1.f);
			alSourcef(m.source, AL_GAIN, 1.f);
			alSource3f(m.source, AL_POSITION, 0, 0, 0);
			alSource3f(m.source, AL_VELOCITY, 0, 0, 0);
			alSourcei(m.source, AL_LOOPING, 0);
		}
		if (!m.buffers[m.nextBuffer]) alGenBuffers(3, m.buffers);
		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}

		if (m.samplesCount[m.nextBuffer]) {
			alSourceUnqueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
			m.skipStart += m.samplesCount[m.nextBuffer];
		}

		m.samplesCount[m.nextBuffer] = samplesAdded;
		alBufferData(m.buffers[m.nextBuffer], AL_FORMAT_STEREO16, result.constData(), result.size(), AudioVoiceMsgFrequency);
		alSourceQueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
		m.skipEnd -= samplesAdded;

		m.nextBuffer = (m.nextBuffer + 1) % 3;

		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}
	} else {
		finished = true;
	}
	if (finished) {
		m.skipEnd = 0;
		m.duration = m.skipStart + m.samplesCount[0] + m.samplesCount[1] + m.samplesCount[2];
	}
	m.loading = false;
	if (m.state == VoiceMessageResuming || m.state == VoiceMessagePlaying || m.state == VoiceMessageStarting) {
		ALint state = AL_INITIAL;
		alGetSourcei(m.source, AL_SOURCE_STATE, &state);
		if (_checkALError()) {
			if (state != AL_PLAYING) {
				alSourcePlay(m.source);
				emit needToCheck();
			}
		}
	}
}
コード例 #22
0
ファイル: openal-audio.c プロジェクト: FredrikL/PandaUI
static void* audio_start(void *aux)
{
	audio_fifo_t *af = aux;
    audio_fifo_data_t *afd;
	unsigned int frame = 0;
	ALCdevice *device = NULL;
	ALCcontext *context = NULL;
	ALuint buffers[NUM_BUFFERS];
	ALuint source;
	ALint processed;
	ALenum error;
	ALint rate;
	ALint channels;
	device = alcOpenDevice(NULL); /* Use the default device */
	if (!device) error_exit("failed to open device");
	context = alcCreateContext(device, NULL);
	alcMakeContextCurrent(context);
	alListenerf(AL_GAIN, 1.0f);
	alDistanceModel(AL_NONE);
	alGenBuffers((ALsizei)NUM_BUFFERS, buffers);
	alGenSources(1, &source);

	/* First prebuffer some audio */
	queue_buffer(source, af, buffers[0]);
	queue_buffer(source, af, buffers[1]);
	queue_buffer(source, af, buffers[2]);
	for (;;) {
      
		alSourcePlay(source);
		for (;;) {
			/* Wait for some audio to play */
			do {
				alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
				usleep(100);
			} while (!processed);
			
			/* Remove old audio from the queue.. */
			alSourceUnqueueBuffers(source, 1, &buffers[frame % 3]);
			
			/* and queue some more audio */
			afd = audio_get(af);
			alGetBufferi(buffers[frame % 3], AL_FREQUENCY, &rate);
			alGetBufferi(buffers[frame % 3], AL_CHANNELS, &channels);
			if (afd->rate != rate || afd->channels != channels) {
				printf("rate or channel count changed, resetting\n");
				break;
			}
			alBufferData(buffers[frame % 3], 
						 afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16, 
						 afd->samples, 
						 afd->nsamples * afd->channels * sizeof(short), 
						 afd->rate);

			alSourceQueueBuffers(source, 1, &buffers[frame % 3]);
			
			if ((error = alcGetError(device)) != AL_NO_ERROR) {
				printf("openal al error: %d\n", error);
				exit(1);
			}
			frame++;
		}
		/* Format or rate changed, so we need to reset all buffers */
		alSourcei(source, AL_BUFFER, 0);
		alSourceStop(source);

		/* Make sure we don't lose the audio packet that caused the change */
		alBufferData(buffers[0], 
					 afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16, 
					 afd->samples, 
					 afd->nsamples * afd->channels * sizeof(short), 
					 afd->rate);

		alSourceQueueBuffers(source, 1, &buffers[0]);
		queue_buffer(source, af, buffers[1]);
		queue_buffer(source, af, buffers[2]);
		frame = 0;
	}
}
コード例 #23
0
void
openAL_SourceStop (audio_Object srcobj)
{
	alSourceStop ((ALuint) srcobj);
}
コード例 #24
0
ファイル: openal-audio.c プロジェクト: Malrok/jahspotify
static void* audio_start(void *aux)
{
    audio_fifo_t *af = aux;
    audio_fifo_data_t *afd;
    ALCdevice *device = NULL;
    ALCcontext *context = NULL;
    ALuint buffers[NUM_BUFFERS];
    ALint processed;
    ALenum error;
    ALint rate;
    ALint channels;
    
    device = alcOpenDevice(NULL); /* Use the default device */
    if (!device) error_exit("failed to open device");
    context = alcCreateContext(device, NULL);
    alcMakeContextCurrent(context);
    alListenerf(AL_GAIN, 1.0f);
    alDistanceModel(AL_NONE);
    alGenBuffers((ALsizei)NUM_BUFFERS, buffers);
    alGenSources(1, &source);

    /* First prebuffer some audio */
    queue_buffer(source, af, buffers[0]);
    queue_buffer(source, af, buffers[1]);
    queue_buffer(source, af, buffers[2]);
    for (;;) {

        alSourcePlay(source);
        for (;;) {
            /* Wait for some audio to play */
            alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
            if (processed <= 0)
            {
                usleep(200);
                continue;
            }

            /* Remove old audio from the queue.. */
            ALuint buffer;
            
            alSourceUnqueueBuffers(source, 1, &buffer);

            /* and queue some more audio */
            afd = audio_get(af);

            alGetBufferi(buffer, AL_FREQUENCY, &rate);
            alGetBufferi(buffer, AL_CHANNELS, &channels);
            if (afd->rate != rate || afd->channels != channels) 
            {
                log_debug("openal","audio_start","rate or channel count changed, resetting");
                break;
            }

            alBufferData(buffer,
                afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16,
                afd->samples,
                afd->nsamples * afd->channels * sizeof(int16_t),
                afd->rate);

            free(afd);

            ALenum error = alGetError();
            if (error != AL_NO_ERROR)
            {
                log_error("openal","audio_start","Error buffering: %s", alGetString(error));
                return NULL;
            }

            alSourceQueueBuffers(source, 1, &buffer);

            error = alGetError();
            if (alGetError() != AL_NO_ERROR)
            {
                log_error("openal","audio_start","Error queing buffering: %s", alGetString(error));
                return NULL;
            }


            alGetSourcei(source, AL_SOURCE_STATE, &processed);
            if (processed != AL_PLAYING)
            {
                // Resume playing
                alSourcePlay(source);
            }

            if ((error = alcGetError(device)) != AL_NO_ERROR) {
                log_error("openal","audio_start","Error queing buffering: %s", alGetString(error));
                exit(1);
            }
            
        }

        /* Format or rate changed, so we need to reset all buffers */
        alSourcei(source, AL_BUFFER, 0);
        alSourceStop(source);

        /* Make sure we don't lose the audio packet that caused the change */
        alBufferData(buffers[0],
                     afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16,
                     afd->samples,
                     afd->nsamples * afd->channels * sizeof(short),
                     afd->rate);

        free(afd);

        alSourceQueueBuffers(source, 1, &buffers[0]);
        queue_buffer(source, af, buffers[1]);
        queue_buffer(source, af, buffers[2]);
      
    }
}
コード例 #25
0
JNIEXPORT void JNICALL Java_org_lwjgl_openal_AL10_nalSourceStop(JNIEnv *__env, jclass clazz, jint source, jlong __functionAddress) {
	alSourceStopPROC alSourceStop = (alSourceStopPROC)(intptr_t)__functionAddress;
	UNUSED_PARAMS(__env, clazz)
	alSourceStop(source);
}
コード例 #26
0
bool SoundInstance::stop()
{
	alGetError();
	alSourceStop(mSource->getALSource());
	return SoundGeneral::checkAlError("Stopping sound instance.");
}
コード例 #27
0
ファイル: SoundChannel.cpp プロジェクト: A-K/naali
void SoundChannel::QueueBuffers()
{
    // See that we do have waiting sounds and they're ready to play
    AudioAssetPtr pending = pending_sounds_.size() > 0 ? pending_sounds_.front() : AudioAssetPtr();

    if (!pending)
        return;
    
    // Create source now if did not exist already
    if (!CreateSource())
    {
        state_ = Stopped;
        pending_sounds_.clear();
        return;
    }
    
    bool queued = false;
    
    // Buffer pending sounds, move them to playing vector
    while(pending_sounds_.size() > 0)
    {
        AudioAssetPtr sound = pending_sounds_.front();
        if (!sound)
        {
            pending_sounds_.pop_front();
            continue;
        }
        ALuint buffer = sound->GetHandle();
        // If no valid handle yet, cannot play this one, break out
        if (!buffer)
            return;
        
        alGetError();
        alSourceQueueBuffers(handle_, 1, &buffer);
        ALenum error = alGetError();
        
        if (error != AL_NONE)
        {
            // If queuing fails, we may have changed sound format. Stop, flush queue & retry
            alSourceStop(handle_);
            alSourcei(handle_, AL_BUFFER, 0);
            alSourceQueueBuffers(handle_, 1, &buffer);
            ALenum error = alGetError();
            if (error != AL_NONE)
                LogError("Could not queue OpenAL sound buffer: " + ToString<int>(error));
            else
            {
                playing_sounds_.push_back(sound);
                queued = true;
            }
        }
        else
        {
            playing_sounds_.push_back(sound);
            queued = true;
        }
        
        pending_sounds_.pop_front();
    }
    
    // If at least one sound queued, start playback if not already playing
    if (queued)
    {
        ALint playing;
        alGetSourcei(handle_, AL_SOURCE_STATE, &playing);
        if (playing != AL_PLAYING)
            alSourcePlay(handle_);
        state_ = Playing;
    }
}
コード例 #28
0
ファイル: AEAudio.c プロジェクト: Oddity007/AE
void AEAudioContextSourceDelete(AEAudioContext* self, ALuint source){
	AEArrayRemoveBytes(& self->sources, &source);
	alSourceStop(source);
	alDeleteSources(1, &source);
}